From 57f3f36cb1a828bac493c8c7ec03b100335ae410 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu <31917973+acatangiu@users.noreply.github.com> Date: Mon, 15 Nov 2021 16:14:08 +0200 Subject: [PATCH] Remove unused PoA<>Substrate bridge (#1210) * Decouple the PoA bridge code from Rialto * Remove Rialto PoA bridge code * Remove relays/bin-ethereum code * Remove relays/client-ethereum code * Remove modules/ethereum code * Remove modules/ethereum-contract-builtin code * Remove PoA bridge documentation * Remove primitives/ethereum-poa code * Decouple Rialto from currency-exchange * Fix building with runtime-benchmarks * Fix should_encode_bridge_send_message_call test Because we removed some runtime modules/pallets, the substrate2substrate bridge pallet has a different index within the runtime so its calls have a different encoding. Update the test to use the new encoding. * Update readme - no more PoA bridge * Remove deployments/bridges/poa-rialto Also removes: - deployments/networks/eth-poa.yml - deployments/networks/OpenEthereum.Dockerfile * Remove deployments/dev/poa-config * Update deployments readme - no more PoA bridge * Remove eth-related scripts Deletes: - deployments/networks/eth-poa.yml - scripts/run-openethereum-node.sh * Remove poa-relay from gitlab-ci * Dockerfiles to use substrate-relay as default * Remove modules/currency-exchange code * Remove primitives/currency-exchange code Signed-off-by: acatangiu --- bridges/README.md | 7 +- bridges/bin/rialto/node/src/chain_spec.rs | 24 +- bridges/bin/rialto/runtime/Cargo.toml | 10 - bridges/bin/rialto/runtime/src/benches.rs | 38 - bridges/bin/rialto/runtime/src/exchange.rs | 259 --- bridges/bin/rialto/runtime/src/kovan.rs | 201 --- bridges/bin/rialto/runtime/src/lib.rs | 308 ---- bridges/bin/rialto/runtime/src/rialto_poa.rs | 183 -- bridges/modules/currency-exchange/Cargo.toml | 50 - .../currency-exchange/src/benchmarking.rs | 135 -- bridges/modules/currency-exchange/src/lib.rs | 514 ------ .../ethereum-contract-builtin/Cargo.toml | 28 - .../ethereum-contract-builtin/src/lib.rs | 372 ---- bridges/modules/ethereum/Cargo.toml | 51 - bridges/modules/ethereum/src/benchmarking.rs | 270 --- bridges/modules/ethereum/src/error.rs | 102 -- bridges/modules/ethereum/src/finality.rs | 557 ------ bridges/modules/ethereum/src/import.rs | 600 ------- bridges/modules/ethereum/src/lib.rs | 1572 ----------------- bridges/modules/ethereum/src/mock.rs | 188 -- bridges/modules/ethereum/src/test_utils.rs | 322 ---- bridges/modules/ethereum/src/validators.rs | 458 ----- bridges/modules/ethereum/src/verification.rs | 972 ---------- .../primitives/currency-exchange/Cargo.toml | 27 - .../primitives/currency-exchange/src/lib.rs | 153 -- bridges/primitives/ethereum-poa/Cargo.toml | 59 - bridges/primitives/ethereum-poa/src/lib.rs | 732 -------- .../primitives/ethereum-poa/src/signatures.rs | 138 -- bridges/relays/bin-ethereum/Cargo.toml | 42 - bridges/relays/bin-ethereum/README.md | 7 - .../res/substrate-bridge-abi.json | 167 -- .../res/substrate-bridge-bytecode.hex | 1 - .../res/substrate-bridge-metadata.txt | 5 - bridges/relays/bin-ethereum/src/cli.yml | 166 -- bridges/relays/bin-ethereum/src/error.rs | 38 - .../bin-ethereum/src/ethereum_client.rs | 631 ------- .../src/ethereum_deploy_contract.rs | 164 -- .../bin-ethereum/src/ethereum_exchange.rs | 402 ----- .../src/ethereum_exchange_submit.rs | 110 -- .../bin-ethereum/src/ethereum_sync_loop.rs | 299 ---- bridges/relays/bin-ethereum/src/instances.rs | 119 -- bridges/relays/bin-ethereum/src/main.rs | 424 ----- .../relays/bin-ethereum/src/rialto_client.rs | 300 ---- bridges/relays/bin-ethereum/src/rpc_errors.rs | 67 - .../bin-ethereum/src/substrate_sync_loop.rs | 204 --- .../bin-ethereum/src/substrate_types.rs | 78 - .../bin-substrate/src/cli/encode_call.rs | 2 +- bridges/relays/client-ethereum/Cargo.toml | 20 - bridges/relays/client-ethereum/src/client.rs | 265 --- bridges/relays/client-ethereum/src/error.rs | 82 - bridges/relays/client-ethereum/src/lib.rs | 47 - bridges/relays/client-ethereum/src/rpc.rs | 51 - bridges/relays/client-ethereum/src/sign.rs | 87 - bridges/relays/client-ethereum/src/types.rs | 80 - 54 files changed, 7 insertions(+), 12181 deletions(-) delete mode 100644 bridges/bin/rialto/runtime/src/benches.rs delete mode 100644 bridges/bin/rialto/runtime/src/exchange.rs delete mode 100644 bridges/bin/rialto/runtime/src/kovan.rs delete mode 100644 bridges/bin/rialto/runtime/src/rialto_poa.rs delete mode 100644 bridges/modules/currency-exchange/Cargo.toml delete mode 100644 bridges/modules/currency-exchange/src/benchmarking.rs delete mode 100644 bridges/modules/currency-exchange/src/lib.rs delete mode 100644 bridges/modules/ethereum-contract-builtin/Cargo.toml delete mode 100644 bridges/modules/ethereum-contract-builtin/src/lib.rs delete mode 100644 bridges/modules/ethereum/Cargo.toml delete mode 100644 bridges/modules/ethereum/src/benchmarking.rs delete mode 100644 bridges/modules/ethereum/src/error.rs delete mode 100644 bridges/modules/ethereum/src/finality.rs delete mode 100644 bridges/modules/ethereum/src/import.rs delete mode 100644 bridges/modules/ethereum/src/lib.rs delete mode 100644 bridges/modules/ethereum/src/mock.rs delete mode 100644 bridges/modules/ethereum/src/test_utils.rs delete mode 100644 bridges/modules/ethereum/src/validators.rs delete mode 100644 bridges/modules/ethereum/src/verification.rs delete mode 100644 bridges/primitives/currency-exchange/Cargo.toml delete mode 100644 bridges/primitives/currency-exchange/src/lib.rs delete mode 100644 bridges/primitives/ethereum-poa/Cargo.toml delete mode 100644 bridges/primitives/ethereum-poa/src/lib.rs delete mode 100644 bridges/primitives/ethereum-poa/src/signatures.rs delete mode 100644 bridges/relays/bin-ethereum/Cargo.toml delete mode 100644 bridges/relays/bin-ethereum/README.md delete mode 100644 bridges/relays/bin-ethereum/res/substrate-bridge-abi.json delete mode 100644 bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex delete mode 100644 bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt delete mode 100644 bridges/relays/bin-ethereum/src/cli.yml delete mode 100644 bridges/relays/bin-ethereum/src/error.rs delete mode 100644 bridges/relays/bin-ethereum/src/ethereum_client.rs delete mode 100644 bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs delete mode 100644 bridges/relays/bin-ethereum/src/ethereum_exchange.rs delete mode 100644 bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs delete mode 100644 bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs delete mode 100644 bridges/relays/bin-ethereum/src/instances.rs delete mode 100644 bridges/relays/bin-ethereum/src/main.rs delete mode 100644 bridges/relays/bin-ethereum/src/rialto_client.rs delete mode 100644 bridges/relays/bin-ethereum/src/rpc_errors.rs delete mode 100644 bridges/relays/bin-ethereum/src/substrate_sync_loop.rs delete mode 100644 bridges/relays/bin-ethereum/src/substrate_types.rs delete mode 100644 bridges/relays/client-ethereum/Cargo.toml delete mode 100644 bridges/relays/client-ethereum/src/client.rs delete mode 100644 bridges/relays/client-ethereum/src/error.rs delete mode 100644 bridges/relays/client-ethereum/src/lib.rs delete mode 100644 bridges/relays/client-ethereum/src/rpc.rs delete mode 100644 bridges/relays/client-ethereum/src/sign.rs delete mode 100644 bridges/relays/client-ethereum/src/types.rs diff --git a/bridges/README.md b/bridges/README.md index 41d7fec13d5b..ac3e49b94c6a 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -6,7 +6,7 @@ These components include Substrate pallets for syncing headers, passing arbitrar as libraries for building relayers to provide cross-chain communication capabilities. Three bridge nodes are also available. The nodes can be used to run test networks which bridge other -Substrate chains or Ethereum Proof-of-Authority chains. +Substrate chains. 🚧 The bridges are currently under construction - a hardhat is recommended beyond this point 🚧 @@ -38,7 +38,7 @@ cargo build --all cargo test --all ``` -Also you can build the repo with +Also you can build the repo with [Parity CI Docker image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci): ```bash @@ -54,7 +54,7 @@ docker run --rm -it -w /shellhere/parity-bridges-common \ #artifacts can be found in ~/cache/target ``` -If you want to reproduce other steps of CI process you can use the following +If you want to reproduce other steps of CI process you can use the following [guide](https://github.com/paritytech/scripts#reproduce-ci-locally). If you need more information about setting up your development environment Substrate's @@ -104,7 +104,6 @@ the `relays` which are used to pass messages between chains. ├── diagrams // Pretty pictures of the project architecture │ └── ... ├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── ethereum // Ethereum PoA Header Sync Module │ ├── grandpa // On-Chain GRANDPA Light Client │ ├── messages // Cross Chain Message Passing │ ├── dispatch // Target Chain Message Execution diff --git a/bridges/bin/rialto/node/src/chain_spec.rs b/bridges/bin/rialto/node/src/chain_spec.rs index 3ccfa13e74ac..49f77c9bc6a8 100644 --- a/bridges/bin/rialto/node/src/chain_spec.rs +++ b/bridges/bin/rialto/node/src/chain_spec.rs @@ -17,9 +17,9 @@ use bp_rialto::derive_account_from_millau_id; use polkadot_primitives::v1::{AssignmentId, ValidatorId}; use rialto_runtime::{ - AccountId, BabeConfig, BalancesConfig, BridgeKovanConfig, BridgeMillauMessagesConfig, - BridgeRialtoPoaConfig, ConfigurationConfig, GenesisConfig, GrandpaConfig, SessionConfig, - SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, + AccountId, BabeConfig, BalancesConfig, BridgeMillauMessagesConfig, ConfigurationConfig, + GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, + WASM_BINARY, }; use serde_json::json; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; @@ -216,8 +216,6 @@ fn testnet_genesis( authorities: Vec::new(), epoch_config: Some(rialto_runtime::BABE_GENESIS_EPOCH_CONFIG), }, - bridge_rialto_poa: load_rialto_poa_bridge_config(), - bridge_kovan: load_kovan_bridge_config(), grandpa: GrandpaConfig { authorities: Vec::new() }, sudo: SudoConfig { key: root_key }, session: SessionConfig { @@ -291,22 +289,6 @@ fn testnet_genesis( } } -fn load_rialto_poa_bridge_config() -> BridgeRialtoPoaConfig { - BridgeRialtoPoaConfig { - initial_header: rialto_runtime::rialto_poa::genesis_header(), - initial_difficulty: 0.into(), - initial_validators: rialto_runtime::rialto_poa::genesis_validators(), - } -} - -fn load_kovan_bridge_config() -> BridgeKovanConfig { - BridgeKovanConfig { - initial_header: rialto_runtime::kovan::genesis_header(), - initial_difficulty: 0.into(), - initial_validators: rialto_runtime::kovan::genesis_validators(), - } -} - #[test] fn derived_dave_account_is_as_expected() { let dave = get_account_id_from_seed::("Dave"); diff --git a/bridges/bin/rialto/runtime/Cargo.toml b/bridges/bin/rialto/runtime/Cargo.toml index c0be917c2bcc..36dc436ddca6 100644 --- a/bridges/bin/rialto/runtime/Cargo.toml +++ b/bridges/bin/rialto/runtime/Cargo.toml @@ -17,8 +17,6 @@ serde = { version = "1.0", optional = true, features = ["derive"] } # Bridge dependencies -bp-currency-exchange = { path = "../../../primitives/currency-exchange", default-features = false } -bp-eth-poa = { path = "../../../primitives/ethereum-poa", default-features = false } bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } bp-message-dispatch = { path = "../../../primitives/message-dispatch", default-features = false } bp-messages = { path = "../../../primitives/messages", default-features = false } @@ -26,9 +24,7 @@ bp-millau = { path = "../../../primitives/chain-millau", default-features = fals bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-currency-exchange = { path = "../../../modules/currency-exchange", default-features = false } pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } -pallet-bridge-eth-poa = { path = "../../../modules/ethereum", default-features = false } pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } @@ -80,8 +76,6 @@ substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", bran [features] default = ["std"] std = [ - "bp-currency-exchange/std", - "bp-eth-poa/std", "bp-header-chain/std", "bp-message-dispatch/std", "bp-messages/std", @@ -99,9 +93,7 @@ std = [ "pallet-authority-discovery/std", "pallet-babe/std", "pallet-balances/std", - "pallet-bridge-currency-exchange/std", "pallet-bridge-dispatch/std", - "pallet-bridge-eth-poa/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", "pallet-grandpa/std", @@ -137,8 +129,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "libsecp256k1", - "pallet-bridge-currency-exchange/runtime-benchmarks", - "pallet-bridge-eth-poa/runtime-benchmarks", "pallet-bridge-messages/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/bridges/bin/rialto/runtime/src/benches.rs b/bridges/bin/rialto/runtime/src/benches.rs deleted file mode 100644 index ce3f84069795..000000000000 --- a/bridges/bin/rialto/runtime/src/benches.rs +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! We want to use a different validator configuration for benchmarking than what's used in Kovan -//! or in our Rialto test network. However, we can't configure a new validator set on the fly which -//! means we need to wire the runtime together like this - -use pallet_bridge_eth_poa::{ValidatorsConfiguration, ValidatorsSource}; -use sp_std::vec; - -pub use crate::kovan::{ - genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval, - PruningStrategy, -}; - -frame_support::parameter_types! { - pub BridgeValidatorsConfiguration: pallet_bridge_eth_poa::ValidatorsConfiguration = bench_validator_config(); -} - -fn bench_validator_config() -> ValidatorsConfiguration { - ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (1, ValidatorsSource::Contract([3; 20].into(), vec![[1; 20].into()])), - ]) -} diff --git a/bridges/bin/rialto/runtime/src/exchange.rs b/bridges/bin/rialto/runtime/src/exchange.rs deleted file mode 100644 index 4e18053e52e3..000000000000 --- a/bridges/bin/rialto/runtime/src/exchange.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Support for PoA -> Substrate native tokens exchange. -//! -//! If you want to exchange native PoA tokens for native Substrate -//! chain tokens, you need to: -//! 1) send some PoA tokens to `LOCK_FUNDS_ADDRESS` address on PoA chain. Data field of -//! the transaction must be SCALE-encoded id of Substrate account that will receive -//! funds on Substrate chain; -//! 2) wait until the 'lock funds' transaction is mined on PoA chain; -//! 3) wait until the block containing the 'lock funds' transaction is finalized on PoA chain; -//! 4) wait until the required PoA header and its finality are provided -//! to the PoA -> Substrate bridge module (it can be provided by you); -//! 5) receive tokens by providing proof-of-inclusion of PoA transaction. - -use bp_currency_exchange::{ - Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction, - Result as ExchangeResult, -}; -use bp_eth_poa::{transaction_decode_rlp, RawTransaction, RawTransactionReceipt}; -use codec::{Decode, Encode}; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use scale_info::TypeInfo; -use sp_std::vec::Vec; - -/// Ethereum address where locked PoA funds must be sent to. -pub const LOCK_FUNDS_ADDRESS: [u8; 20] = hex!("DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"); - -/// Ethereum transaction inclusion proof. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct EthereumTransactionInclusionProof { - /// Hash of the block with transaction. - pub block: sp_core::H256, - /// Index of the transaction within the block. - pub index: u64, - /// The proof itself (right now it is all RLP-encoded transactions of the block + - /// RLP-encoded receipts of all transactions of the block). - pub proof: Vec<(RawTransaction, RawTransactionReceipt)>, -} - -/// We uniquely identify transfer by the pair (sender, nonce). -/// -/// The assumption is that this pair will never appear more than once in -/// transactions included into finalized blocks. This is obviously true -/// for any existing eth-like chain (that keep current TX format), because -/// otherwise transaction can be replayed over and over. -#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct EthereumTransactionTag { - /// Account that has locked funds. - pub account: [u8; 20], - /// Lock transaction nonce. - pub nonce: sp_core::U256, -} - -/// Ethereum transaction from runtime perspective. -pub struct EthTransaction; - -impl MaybeLockFundsTransaction for EthTransaction { - type Transaction = RawTransaction; - type Id = EthereumTransactionTag; - type Recipient = crate::AccountId; - type Amount = crate::Balance; - - fn parse( - raw_tx: &Self::Transaction, - ) -> ExchangeResult> { - let tx = transaction_decode_rlp(raw_tx).map_err(|_| ExchangeError::InvalidTransaction)?; - - // we only accept transactions sending funds directly to the pre-configured address - if tx.unsigned.to != Some(LOCK_FUNDS_ADDRESS.into()) { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid peer recipient: {:?}", - tx.unsigned.to, - ); - - return Err(ExchangeError::InvalidTransaction) - } - - let mut recipient_raw = sp_core::H256::default(); - match tx.unsigned.payload.len() { - 32 => recipient_raw.as_fixed_bytes_mut().copy_from_slice(&tx.unsigned.payload), - len => { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid recipient length: {}", - len, - ); - - return Err(ExchangeError::InvalidRecipient) - }, - } - let amount = tx.unsigned.value.low_u128(); - - if tx.unsigned.value != amount.into() { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid amount: {}", - tx.unsigned.value, - ); - - return Err(ExchangeError::InvalidAmount) - } - - Ok(LockFundsTransaction { - id: EthereumTransactionTag { - account: *tx.sender.as_fixed_bytes(), - nonce: tx.unsigned.nonce, - }, - recipient: crate::AccountId::from(*recipient_raw.as_fixed_bytes()), - amount, - }) - } -} - -/// Prepares everything required to bench claim of funds locked by given transaction. -#[cfg(feature = "runtime-benchmarks")] -pub(crate) fn prepare_environment_for_claim, I: 'static>( - transactions: &[(RawTransaction, RawTransactionReceipt)], -) -> bp_eth_poa::H256 { - use bp_eth_poa::compute_merkle_root; - use pallet_bridge_eth_poa::{ - test_utils::{insert_dummy_header, validator_utils::validator, HeaderBuilder}, - BridgeStorage, Storage, - }; - - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent_number_on_runtime::(0) - .transactions_root(compute_merkle_root(transactions.iter().map(|(tx, _)| tx))) - .receipts_root(compute_merkle_root(transactions.iter().map(|(_, receipt)| receipt))) - .sign_by(&validator(0)); - let header_id = header.compute_id(); - insert_dummy_header(&mut storage, header); - storage.finalize_and_prune_headers(Some(header_id), 0); - - header_id.hash -} - -/// Prepare signed ethereum lock-funds transaction. -#[cfg(any(feature = "runtime-benchmarks", test))] -pub(crate) fn prepare_ethereum_transaction( - recipient: &crate::AccountId, - editor: impl Fn(&mut bp_eth_poa::UnsignedTransaction), -) -> (RawTransaction, RawTransactionReceipt) { - use bp_eth_poa::{signatures::SignTransaction, Receipt, TransactionOutcome}; - - // prepare tx for OpenEthereum private dev chain: - // chain id is 0x11 - // sender secret is 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 - let chain_id = 0x11; - let signer = libsecp256k1::SecretKey::parse(&hex!( - "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" - )) - .unwrap(); - let recipient_raw: &[u8; 32] = recipient.as_ref(); - let mut eth_tx = bp_eth_poa::UnsignedTransaction { - nonce: 0.into(), - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: 100.into(), - gas: 100_000.into(), - gas_price: 100_000.into(), - payload: recipient_raw.to_vec(), - }; - editor(&mut eth_tx); - ( - eth_tx.sign_by(&signer, Some(chain_id)), - Receipt { - outcome: TransactionOutcome::StatusCode(1), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp(), - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - fn ferdie() -> crate::AccountId { - hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c").into() - } - - #[test] - fn valid_transaction_accepted() { - assert_eq!( - EthTransaction::parse(&prepare_ethereum_transaction(&ferdie(), |_| {}).0), - Ok(LockFundsTransaction { - id: EthereumTransactionTag { - account: hex!("00a329c0648769a73afac7f9381e08fb43dbea72"), - nonce: 0.into(), - }, - recipient: ferdie(), - amount: 100, - }), - ); - } - - #[test] - fn invalid_transaction_rejected() { - assert_eq!(EthTransaction::parse(&Vec::new()), Err(ExchangeError::InvalidTransaction),); - } - - #[test] - fn transaction_with_invalid_peer_recipient_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.to = None; - }) - .0 - ), - Err(ExchangeError::InvalidTransaction), - ); - } - - #[test] - fn transaction_with_invalid_recipient_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.payload.clear(); - }) - .0 - ), - Err(ExchangeError::InvalidRecipient), - ); - } - - #[test] - fn transaction_with_invalid_amount_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.value = sp_core::U256::from(u128::max_value()) + sp_core::U256::from(1); - }) - .0 - ), - Err(ExchangeError::InvalidAmount), - ); - } -} diff --git a/bridges/bin/rialto/runtime/src/kovan.rs b/bridges/bin/rialto/runtime/src/kovan.rs deleted file mode 100644 index 95b4f8c42f03..000000000000 --- a/bridges/bin/rialto/runtime/src/kovan.rs +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::exchange::EthereumTransactionInclusionProof; - -use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use pallet_bridge_eth_poa::{ - AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy, - ValidatorsConfiguration, ValidatorsSource, -}; -use sp_std::prelude::*; - -frame_support::parameter_types! { - pub const FinalityVotesCachingInterval: Option = Some(16); - pub BridgeAuraConfiguration: AuraConfiguration = - kovan_aura_configuration(); - pub BridgeValidatorsConfiguration: ValidatorsConfiguration = - kovan_validators_configuration(); -} - -/// Max number of finalized headers to keep. It is equivalent of around 24 hours of -/// finalized blocks on current Kovan chain. -const FINALIZED_HEADERS_TO_KEEP: u64 = 20_000; - -/// Aura engine configuration for Kovan chain. -pub fn kovan_aura_configuration() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: u64::max_value(), - strict_empty_steps_transition: 0, - validate_step_transition: 0x16e360, - validate_score_transition: 0x41a3c4, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration for Kovan chain. -pub fn kovan_validators_configuration() -> ValidatorsConfiguration { - ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(genesis_validators())), - ( - 10960440, - ValidatorsSource::List(vec![ - hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), - hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), - hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), - ]), - ), - ( - 10960500, - ValidatorsSource::Contract( - hex!("aE71807C1B0a093cB1547b682DC78316D945c9B8").into(), - vec![ - hex!("d05f7478c6aa10781258c5cc8b4f385fc8fa989c").into(), - hex!("03801efb0efe2a25ede5dd3a003ae880c0292e4d").into(), - hex!("a4df255ecf08bbf2c28055c65225c9a9847abd94").into(), - hex!("596e8221a30bfe6e7eff67fee664a01c73ba3c56").into(), - hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), - ], - ), - ), - ]) -} - -/// Genesis validators set of Kovan chain. -pub fn genesis_validators() -> Vec
{ - vec![ - hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), - hex!("00427feae2419c15b89d1c21af10d1b6650a4d3d").into(), - hex!("4Ed9B08e6354C70fE6F8CB0411b0d3246b424d6c").into(), - hex!("0020ee4Be0e2027d76603cB751eE069519bA81A1").into(), - hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), - hex!("007733a1FE69CF3f2CF989F81C7b4cAc1693387A").into(), - hex!("00E6d2b931F55a3f1701c7389d592a7778897879").into(), - hex!("00e4a10650e5a6D6001C38ff8E64F97016a1645c").into(), - hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), - ] -} - -/// Genesis header of the Kovan chain. -pub fn genesis_header() -> AuraHeader { - AuraHeader { - parent_hash: Default::default(), - timestamp: 0, - number: 0, - author: Default::default(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .into(), - uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - .into(), - extra_data: vec![], - state_root: hex!("2480155b48a1cea17d67dbfdfaafe821c1d19cdd478c5358e8ec56dec24502b2").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .into(), - log_bloom: Default::default(), - gas_used: Default::default(), - gas_limit: 6000000.into(), - difficulty: 131072.into(), - seal: vec![ - vec![128], - vec![ - 184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - ], - } -} - -/// Kovan headers pruning strategy. -/// -/// We do not prune unfinalized headers because exchange module only accepts -/// claims from finalized headers. And if we're pruning unfinalized headers, then -/// some claims may never be accepted. -#[derive(Default, RuntimeDebug)] -pub struct PruningStrategy; - -impl BridgePruningStrategy for PruningStrategy { - fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) - } -} - -/// PoA Header timestamp verification against `Timestamp` pallet. -#[derive(Default, RuntimeDebug)] -pub struct ChainTime; - -impl TChainTime for ChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = super::Timestamp::now(); - timestamp > now - } -} - -/// The Kovan Blockchain as seen by the runtime. -pub struct KovanBlockchain; - -impl InclusionProofVerifier for KovanBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = EthereumTransactionInclusionProof; - - fn verify_transaction_inclusion_proof( - proof: &Self::TransactionInclusionProof, - ) -> Option { - let is_transaction_finalized = crate::BridgeKovan::verify_transaction_finalized( - proof.block, - proof.index, - &proof.proof, - ); - - if !is_transaction_finalized { - return None - } - - proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn pruning_strategy_keeps_enough_headers() { - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 10_000), - 0, - "10_000 <= 20_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 20_000), - 0, - "20_000 <= 20_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 30_000), - 10_000, - "20_000 <= 30_000 => we're ready to prune first 10_000 headers", - ); - } -} diff --git a/bridges/bin/rialto/runtime/src/lib.rs b/bridges/bin/rialto/runtime/src/lib.rs index bf5a8b36ec19..aae9aa21c2fa 100644 --- a/bridges/bin/rialto/runtime/src/lib.rs +++ b/bridges/bin/rialto/runtime/src/lib.rs @@ -30,14 +30,8 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod exchange; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benches; -pub mod kovan; pub mod millau_messages; pub mod parachains; -pub mod rialto_poa; use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; @@ -72,8 +66,6 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_bridge_currency_exchange::Call as BridgeCurrencyExchangeCall; -pub use pallet_bridge_eth_poa::Call as BridgeEthPoACall; pub use pallet_bridge_grandpa::Call as BridgeGrandpaMillauCall; pub use pallet_bridge_messages::Call as MessagesCall; pub use pallet_sudo::Call as SudoCall; @@ -253,48 +245,6 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); } -type RialtoPoA = pallet_bridge_eth_poa::Instance1; -impl pallet_bridge_eth_poa::Config for Runtime { - type AuraConfiguration = rialto_poa::BridgeAuraConfiguration; - type FinalityVotesCachingInterval = rialto_poa::FinalityVotesCachingInterval; - type ValidatorsConfiguration = rialto_poa::BridgeValidatorsConfiguration; - type PruningStrategy = rialto_poa::PruningStrategy; - type ChainTime = rialto_poa::ChainTime; - type OnHeadersSubmitted = (); -} - -type Kovan = pallet_bridge_eth_poa::Instance2; -impl pallet_bridge_eth_poa::Config for Runtime { - type AuraConfiguration = kovan::BridgeAuraConfiguration; - type FinalityVotesCachingInterval = kovan::FinalityVotesCachingInterval; - type ValidatorsConfiguration = kovan::BridgeValidatorsConfiguration; - type PruningStrategy = kovan::PruningStrategy; - type ChainTime = kovan::ChainTime; - type OnHeadersSubmitted = (); -} - -type RialtoCurrencyExchange = pallet_bridge_currency_exchange::Instance1; -impl pallet_bridge_currency_exchange::Config for Runtime { - type OnTransactionSubmitted = (); - type PeerBlockchain = rialto_poa::RialtoBlockchain; - type PeerMaybeLockFundsTransaction = exchange::EthTransaction; - type RecipientsMap = bp_currency_exchange::IdentityRecipients; - type Amount = Balance; - type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; - type DepositInto = DepositInto; -} - -type KovanCurrencyExchange = pallet_bridge_currency_exchange::Instance2; -impl pallet_bridge_currency_exchange::Config for Runtime { - type OnTransactionSubmitted = (); - type PeerBlockchain = kovan::KovanBlockchain; - type PeerMaybeLockFundsTransaction = exchange::EthTransaction; - type RecipientsMap = bp_currency_exchange::IdentityRecipients; - type Amount = Balance; - type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; - type DepositInto = DepositInto; -} - impl pallet_bridge_dispatch::Config for Runtime { type Event = Event; type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce); @@ -307,68 +257,6 @@ impl pallet_bridge_dispatch::Config for Runtime { type AccountIdConverter = bp_rialto::AccountIdConverter; } -pub struct DepositInto; - -impl bp_currency_exchange::DepositInto for DepositInto { - type Recipient = AccountId; - type Amount = Balance; - - fn deposit_into( - recipient: Self::Recipient, - amount: Self::Amount, - ) -> bp_currency_exchange::Result<()> { - // let balances module make all checks for us (it won't allow depositing lower than - // existential deposit, balance overflow, ...) - let deposited = as Currency>::deposit_creating( - &recipient, amount, - ); - - // I'm dropping deposited here explicitly to illustrate the fact that it'll update - // `TotalIssuance` on drop - let deposited_amount = deposited.peek(); - drop(deposited); - - // we have 3 cases here: - // - deposited == amount: success - // - deposited == 0: deposit has failed and no changes to storage were made - // - deposited != 0: (should never happen in practice) deposit has been partially completed - match deposited_amount { - _ if deposited_amount == amount => { - log::trace!( - target: "runtime", - "Deposited {} to {:?}", - amount, - recipient, - ); - - Ok(()) - }, - _ if deposited_amount == 0 => { - log::error!( - target: "runtime", - "Deposit of {} to {:?} has failed", - amount, - recipient, - ); - - Err(bp_currency_exchange::Error::DepositFailed) - }, - _ => { - log::error!( - target: "runtime", - "Deposit of {} to {:?} has partially competed. {} has been deposited", - amount, - recipient, - deposited_amount, - ); - - // we can't return DepositFailed error here, because storage changes were made - Err(bp_currency_exchange::Error::DepositPartiallyFailed) - }, - } - } -} - impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -578,12 +466,6 @@ construct_runtime!( Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, ShiftSessionManager: pallet_shift_session_manager::{Pallet}, - // Eth-PoA chains bridge modules. - BridgeRialtoPoa: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, - BridgeKovan: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, - BridgeRialtoCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, - BridgeKovanCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, - // Millau bridge modules. BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, @@ -693,46 +575,6 @@ impl_runtime_apis! { } } - impl bp_eth_poa::RialtoPoAHeaderApi for Runtime { - fn best_block() -> (u64, bp_eth_poa::H256) { - let best_block = BridgeRialtoPoa::best_block(); - (best_block.number, best_block.hash) - } - - fn finalized_block() -> (u64, bp_eth_poa::H256) { - let finalized_block = BridgeRialtoPoa::finalized_block(); - (finalized_block.number, finalized_block.hash) - } - - fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { - BridgeRialtoPoa::is_import_requires_receipts(header) - } - - fn is_known_block(hash: bp_eth_poa::H256) -> bool { - BridgeRialtoPoa::is_known_block(hash) - } - } - - impl bp_eth_poa::KovanHeaderApi for Runtime { - fn best_block() -> (u64, bp_eth_poa::H256) { - let best_block = BridgeKovan::best_block(); - (best_block.number, best_block.hash) - } - - fn finalized_block() -> (u64, bp_eth_poa::H256) { - let finalized_block = BridgeKovan::finalized_block(); - (finalized_block.number, finalized_block.hash) - } - - fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { - BridgeKovan::is_import_requires_receipts(header) - } - - fn is_known_block(hash: bp_eth_poa::H256) -> bool { - BridgeKovan::is_known_block(hash) - } - } - impl bp_millau::MillauFinalityApi for Runtime { fn best_finalized() -> (bp_millau::BlockNumber, bp_millau::Hash) { let header = BridgeMillauGrandpa::best_finalized(); @@ -744,18 +586,6 @@ impl_runtime_apis! { } } - impl bp_currency_exchange::RialtoCurrencyExchangeApi for Runtime { - fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { - BridgeRialtoCurrencyExchange::filter_transaction_proof(&proof) - } - } - - impl bp_currency_exchange::KovanCurrencyExchangeApi for Runtime { - fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { - BridgeKovanCurrencyExchange::filter_transaction_proof(&proof) - } - } - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, @@ -1028,17 +858,10 @@ impl_runtime_apis! { use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; - use pallet_bridge_currency_exchange::benchmarking::Pallet as BridgeCurrencyExchangeBench; use pallet_bridge_messages::benchmarking::Pallet as MessagesBench; let mut list = Vec::::new(); - list_benchmark!(list, extra, pallet_bridge_eth_poa, BridgeRialtoPoa); - list_benchmark!( - list, - extra, - pallet_bridge_currency_exchange, BridgeCurrencyExchangeBench:: - ); list_benchmark!(list, extra, pallet_bridge_messages, MessagesBench::); list_benchmark!(list, extra, pallet_bridge_grandpa, BridgeMillauGrandpa); @@ -1068,46 +891,6 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&config, &whitelist); - use pallet_bridge_currency_exchange::benchmarking::{ - Pallet as BridgeCurrencyExchangeBench, - Config as BridgeCurrencyExchangeConfig, - ProofParams as BridgeCurrencyExchangeProofParams, - }; - - impl BridgeCurrencyExchangeConfig for Runtime { - fn make_proof( - proof_params: BridgeCurrencyExchangeProofParams, - ) -> crate::exchange::EthereumTransactionInclusionProof { - use bp_currency_exchange::DepositInto; - - if proof_params.recipient_exists { - >::DepositInto::deposit_into( - proof_params.recipient.clone(), - ExistentialDeposit::get(), - ).unwrap(); - } - - let (transaction, receipt) = crate::exchange::prepare_ethereum_transaction( - &proof_params.recipient, - |tx| { - // our runtime only supports transactions where data is exactly 32 bytes long - // (receiver key) - // => we are ignoring `transaction_size_factor` here - tx.value = (ExistentialDeposit::get() * 10).into(); - }, - ); - let transactions = sp_std::iter::repeat((transaction, receipt)) - .take(1 + proof_params.proof_size_factor as usize) - .collect::>(); - let block_hash = crate::exchange::prepare_environment_for_claim::(&transactions); - crate::exchange::EthereumTransactionInclusionProof { - block: block_hash, - index: 0, - proof: transactions, - } - } - } - use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; use bp_runtime::messages::DispatchFeePayment; use bridge_runtime_common::messages; @@ -1274,13 +1057,6 @@ impl_runtime_apis! { } } - add_benchmark!(params, batches, pallet_bridge_eth_poa, BridgeRialtoPoa); - add_benchmark!( - params, - batches, - pallet_bridge_currency_exchange, - BridgeCurrencyExchangeBench:: - ); add_benchmark!( params, batches, @@ -1322,48 +1098,8 @@ where #[cfg(test)] mod tests { use super::*; - use bp_currency_exchange::DepositInto; use bridge_runtime_common::messages; - fn run_deposit_into_test(test: impl Fn(AccountId) -> Balance) { - let mut ext: sp_io::TestExternalities = - SystemConfig::default().build_storage::().unwrap().into(); - ext.execute_with(|| { - // initially issuance is zero - assert_eq!( - as Currency>::total_issuance(), - 0, - ); - - // create account - let account: AccountId = [1u8; 32].into(); - let initial_amount = ExistentialDeposit::get(); - let deposited = - as Currency>::deposit_creating( - &account, - initial_amount, - ); - drop(deposited); - assert_eq!( - as Currency>::total_issuance(), - initial_amount, - ); - assert_eq!( - as Currency>::free_balance(&account), - initial_amount, - ); - - // run test - let total_issuance_change = test(account); - - // check that total issuance has changed by `run_deposit_into_test` - assert_eq!( - as Currency>::total_issuance(), - initial_amount + total_issuance_change, - ); - }); - } - #[test] fn ensure_rialto_message_lane_weights_are_correct() { type Weights = pallet_bridge_messages::weights::RialtoWeight; @@ -1405,50 +1141,6 @@ mod tests { ); } - #[test] - fn deposit_into_existing_account_works() { - run_deposit_into_test(|existing_account| { - let initial_amount = - as Currency>::free_balance( - &existing_account, - ); - let additional_amount = 10_000; - >::DepositInto::deposit_into( - existing_account.clone(), - additional_amount, - ) - .unwrap(); - assert_eq!( - as Currency>::free_balance( - &existing_account - ), - initial_amount + additional_amount, - ); - additional_amount - }); - } - - #[test] - fn deposit_into_new_account_works() { - run_deposit_into_test(|_| { - let initial_amount = 0; - let additional_amount = ExistentialDeposit::get() + 10_000; - let new_account: AccountId = [42u8; 32].into(); - >::DepositInto::deposit_into( - new_account.clone(), - additional_amount, - ) - .unwrap(); - assert_eq!( - as Currency>::free_balance( - &new_account - ), - initial_amount + additional_amount, - ); - additional_amount - }); - } - #[test] fn call_size() { const MAX_CALL_SIZE: usize = 230; // value from polkadot-runtime tests diff --git a/bridges/bin/rialto/runtime/src/rialto_poa.rs b/bridges/bin/rialto/runtime/src/rialto_poa.rs deleted file mode 100644 index 865ef387d1b4..000000000000 --- a/bridges/bin/rialto/runtime/src/rialto_poa.rs +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Configuration parameters for the Rialto PoA chain. - -use crate::exchange::EthereumTransactionInclusionProof; - -use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use pallet_bridge_eth_poa::{ - AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy, - ValidatorsConfiguration, ValidatorsSource, -}; -use sp_std::prelude::*; - -frame_support::parameter_types! { - pub const FinalityVotesCachingInterval: Option = Some(8); - pub BridgeAuraConfiguration: AuraConfiguration = - aura_configuration(); - pub BridgeValidatorsConfiguration: ValidatorsConfiguration = - validators_configuration(); -} - -/// Max number of finalized headers to keep. -const FINALIZED_HEADERS_TO_KEEP: u64 = 5_000; - -/// Aura engine configuration for Rialto chain. -pub fn aura_configuration() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: 0xfffffffff, - strict_empty_steps_transition: 0, - validate_step_transition: 0, - validate_score_transition: 0, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration for Rialto PoA chain. -pub fn validators_configuration() -> ValidatorsConfiguration { - ValidatorsConfiguration::Single(ValidatorsSource::List(genesis_validators())) -} - -/// Genesis validators set of Rialto PoA chain. -pub fn genesis_validators() -> Vec
{ - vec![ - hex!("005e714f896a8b7cede9d38688c1a81de72a58e4").into(), - hex!("007594304039c2937a12220338aab821d819f5a4").into(), - hex!("004e7a39907f090e19b0b80a277e77b72b22e269").into(), - ] -} - -/// Genesis header of the Rialto PoA chain. -/// -/// To obtain genesis header from a running node, invoke: -/// ```bash -/// $ http localhost:8545 jsonrpc=2.0 id=1 method=eth_getBlockByNumber params:='["earliest", false]' -v -/// ``` -pub fn genesis_header() -> AuraHeader { - AuraHeader { - parent_hash: Default::default(), - timestamp: 0, - number: 0, - author: Default::default(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .into(), - uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - .into(), - extra_data: vec![], - state_root: hex!("a992d04c791620ed7ed96555a80cf0568355bb4bee2656f46899a4372f25f248").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - .into(), - log_bloom: Default::default(), - gas_used: Default::default(), - gas_limit: 0x222222.into(), - difficulty: 0x20000.into(), - seal: vec![vec![0x80], { - let mut vec = vec![0xb8, 0x41]; - vec.resize(67, 0); - vec - }], - } -} - -/// Rialto PoA headers pruning strategy. -/// -/// We do not prune unfinalized headers because exchange module only accepts -/// claims from finalized headers. And if we're pruning unfinalized headers, then -/// some claims may never be accepted. -#[derive(Default, RuntimeDebug)] -pub struct PruningStrategy; - -impl TPruningStrategy for PruningStrategy { - fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) - } -} - -/// ChainTime provider -#[derive(Default)] -pub struct ChainTime; - -impl TChainTime for ChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = super::Timestamp::now(); - timestamp > now - } -} - -/// The Rialto PoA Blockchain as seen by the runtime. -pub struct RialtoBlockchain; - -impl InclusionProofVerifier for RialtoBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = EthereumTransactionInclusionProof; - - fn verify_transaction_inclusion_proof( - proof: &Self::TransactionInclusionProof, - ) -> Option { - let is_transaction_finalized = crate::BridgeRialtoPoa::verify_transaction_finalized( - proof.block, - proof.index, - &proof.proof, - ); - - if !is_transaction_finalized { - return None - } - - proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn genesis_hash_matches() { - assert_eq!( - genesis_header().compute_hash(), - hex!("1468e1a0fa20d30025a5a0f87e1cced4fdc393b84b7d2850b11ca5863db482cb").into(), - ); - } - - #[test] - fn pruning_strategy_keeps_enough_headers() { - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 1_000), - 0, - "1_000 <= 5_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 5_000), - 0, - "5_000 <= 5_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 10_000), - 5_000, - "5_000 <= 10_000 => we're ready to prune first 5_000 headers", - ); - } -} diff --git a/bridges/modules/currency-exchange/Cargo.toml b/bridges/modules/currency-exchange/Cargo.toml deleted file mode 100644 index dc06a342cc8e..000000000000 --- a/bridges/modules/currency-exchange/Cargo.toml +++ /dev/null @@ -1,50 +0,0 @@ -[package] -name = "pallet-bridge-currency-exchange" -description = "A Substrate Runtime module that accepts 'lock funds' transactions from a peer chain and grants an equivalent amount to a the appropriate Substrate account." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } -log = { version = "0.4.14", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-currency-exchange = { path = "../../primitives/currency-exchange", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-currency-exchange/std", - "bp-header-chain/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "serde", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking", - "sp-std", -] diff --git a/bridges/modules/currency-exchange/src/benchmarking.rs b/bridges/modules/currency-exchange/src/benchmarking.rs deleted file mode 100644 index 813c1bfe884d..000000000000 --- a/bridges/modules/currency-exchange/src/benchmarking.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Exchange module complexity is mostly determined by callbacks, defined by runtime. -//! So we are giving runtime opportunity to prepare environment and construct proof -//! before invoking module calls. - -use super::{ - Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, - Pallet as CurrencyExchangePallet, -}; -use sp_std::prelude::*; - -use frame_benchmarking::{account, benchmarks_instance_pallet}; -use frame_system::RawOrigin; - -const SEED: u32 = 0; -const WORST_TX_SIZE_FACTOR: u32 = 1000; -const WORST_PROOF_SIZE_FACTOR: u32 = 1000; - -/// Pallet we're benchmarking here. -pub struct Pallet, I: 'static>(CurrencyExchangePallet); - -/// Proof benchmarking parameters. -pub struct ProofParams { - /// Funds recipient. - pub recipient: Recipient, - /// When true, recipient must exists before import. - pub recipient_exists: bool, - /// When 0, transaction should have minimal possible size. When this value has non-zero value - /// n, transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. - pub transaction_size_factor: u32, - /// When 0, proof should have minimal possible size. When this value has non-zero value n, - /// proof size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. - pub proof_size_factor: u32, -} - -/// Config that must be implemented by runtime. -pub trait Config: CurrencyExchangeConfig { - /// Prepare proof for importing exchange transaction. - fn make_proof( - proof_params: ProofParams, - ) -> <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof; -} - -benchmarks_instance_pallet! { - // Benchmark `import_peer_transaction` extrinsic with the best possible conditions: - // * Proof is the transaction itself. - // * Transaction has minimal size. - // * Recipient account exists. - import_peer_transaction_best_case { - let i in 1..100; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: 0, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` extrinsic when recipient account does not exists. - import_peer_transaction_when_recipient_does_not_exists { - let i in 1..100; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: false, - transaction_size_factor: 0, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` when transaction size increases. - import_peer_transaction_when_transaction_size_increases { - let i in 1..100; - let n in 1..WORST_TX_SIZE_FACTOR; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: n, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` when proof size increases. - import_peer_transaction_when_proof_size_increases { - let i in 1..100; - let n in 1..WORST_PROOF_SIZE_FACTOR; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: 0, - proof_size_factor: n, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` extrinsic with the worst possible conditions: - // * Proof is large. - // * Transaction has large size. - // * Recipient account does not exists. - import_peer_transaction_worst_case { - let i in 1..100; - let m in WORST_TX_SIZE_FACTOR..WORST_TX_SIZE_FACTOR+1; - let n in WORST_PROOF_SIZE_FACTOR..WORST_PROOF_SIZE_FACTOR+1; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: false, - transaction_size_factor: m, - proof_size_factor: n, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - -} diff --git a/bridges/modules/currency-exchange/src/lib.rs b/bridges/modules/currency-exchange/src/lib.rs deleted file mode 100644 index 31b789dd97e7..000000000000 --- a/bridges/modules/currency-exchange/src/lib.rs +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that allows tokens exchange between two bridged chains. - -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_currency_exchange::{ - CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction, - RecipientsMap, -}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::ensure; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -/// Called when transaction is submitted to the exchange module. -pub trait OnTransactionSubmitted { - /// Called when valid transaction is submitted and accepted by the module. - fn on_valid_transaction_submitted(submitter: AccountId); -} - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// Handler for transaction submission result. - type OnTransactionSubmitted: OnTransactionSubmitted; - /// Represents the blockchain that we'll be exchanging currency with. - type PeerBlockchain: InclusionProofVerifier; - /// Peer blockchain transaction parser. - type PeerMaybeLockFundsTransaction: MaybeLockFundsTransaction< - Transaction = ::Transaction, - >; - /// Map between blockchains recipients. - type RecipientsMap: RecipientsMap< - PeerRecipient = ::Recipient, - Recipient = Self::AccountId, - >; - /// This blockchain currency amount type. - type Amount; - /// Converter from peer blockchain currency type into current blockchain currency type. - type CurrencyConverter: CurrencyConverter< - SourceAmount = ::Amount, - TargetAmount = Self::Amount, - >; - /// Something that could grant money. - type DepositInto: DepositInto; - } - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet {} - - #[pallet::call] - impl, I: 'static> Pallet { - /// Imports lock fund transaction of the peer blockchain. - #[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_peer_transaction( - origin: OriginFor, - proof: <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, - ) -> DispatchResult { - let submitter = frame_system::ensure_signed(origin)?; - - // verify and parse transaction proof - let deposit = prepare_deposit_details::(&proof)?; - - // make sure to update the mapping if we deposit successfully to avoid double spending, - // i.e. whenever `deposit_into` is successful we MUST update `Transfers`. - { - // if any changes were made to the storage, we can't just return error here, because - // otherwise the same proof may be imported again - let deposit_result = - T::DepositInto::deposit_into(deposit.recipient, deposit.amount); - match deposit_result { - Ok(_) => (), - Err(ExchangeError::DepositPartiallyFailed) => (), - Err(error) => return Err(Error::::from(error).into()), - } - Transfers::::insert(&deposit.transfer_id, ()) - } - - // reward submitter for providing valid message - T::OnTransactionSubmitted::on_valid_transaction_submitted(submitter); - - log::trace!( - target: "runtime", - "Completed currency exchange: {:?}", - deposit.transfer_id, - ); - - Ok(()) - } - } - - #[pallet::error] - pub enum Error { - /// Invalid peer blockchain transaction provided. - InvalidTransaction, - /// Peer transaction has invalid amount. - InvalidAmount, - /// Peer transaction has invalid recipient. - InvalidRecipient, - /// Cannot map from peer recipient to this blockchain recipient. - FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockchain currency. - FailedToConvertCurrency, - /// Deposit has failed. - DepositFailed, - /// Deposit has partially failed (changes to recipient account were made). - DepositPartiallyFailed, - /// Transaction is not finalized. - UnfinalizedTransaction, - /// Transaction funds are already claimed. - AlreadyClaimed, - } - - /// All transfers that have already been claimed. - #[pallet::storage] - pub(super) type Transfers, I: 'static = ()> = StorageMap< - _, - Blake2_128Concat, - ::Id, - (), - ValueQuery, - >; -} - -impl, I: 'static> Pallet { - /// Returns true if currency exchange module is able to import given transaction proof in - /// its current state. - pub fn filter_transaction_proof( - proof: &::TransactionInclusionProof, - ) -> bool { - if let Err(err) = prepare_deposit_details::(proof) { - log::trace!( - target: "runtime", - "Can't accept exchange transaction: {:?}", - err, - ); - - return false - } - - true - } -} - -impl, I: 'static> From for Error { - fn from(error: ExchangeError) -> Self { - match error { - ExchangeError::InvalidTransaction => Error::InvalidTransaction, - ExchangeError::InvalidAmount => Error::InvalidAmount, - ExchangeError::InvalidRecipient => Error::InvalidRecipient, - ExchangeError::FailedToMapRecipients => Error::FailedToMapRecipients, - ExchangeError::FailedToConvertCurrency => Error::FailedToConvertCurrency, - ExchangeError::DepositFailed => Error::DepositFailed, - ExchangeError::DepositPartiallyFailed => Error::DepositPartiallyFailed, - } - } -} - -impl OnTransactionSubmitted for () { - fn on_valid_transaction_submitted(_: AccountId) {} -} - -/// Exchange deposit details. -struct DepositDetails, I: 'static> { - /// Transfer id. - pub transfer_id: ::Id, - /// Transfer recipient. - pub recipient: ::Recipient, - /// Transfer amount. - pub amount: ::TargetAmount, -} - -/// Verify and parse transaction proof, preparing everything required for importing -/// this transaction proof. -fn prepare_deposit_details, I: 'static>( - proof: &<>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, -) -> Result, Error> { - // ensure that transaction is included in finalized block that we know of - let transaction = >::PeerBlockchain::verify_transaction_inclusion_proof(proof) - .ok_or(Error::::UnfinalizedTransaction)?; - - // parse transaction - let transaction = >::PeerMaybeLockFundsTransaction::parse(&transaction) - .map_err(Error::::from)?; - let transfer_id = transaction.id; - ensure!(!Transfers::::contains_key(&transfer_id), Error::::AlreadyClaimed); - - // grant recipient - let recipient = T::RecipientsMap::map(transaction.recipient).map_err(Error::::from)?; - let amount = T::CurrencyConverter::convert(transaction.amount).map_err(Error::::from)?; - - Ok(DepositDetails { transfer_id, recipient, amount }) -} - -#[cfg(test)] -mod tests { - // From construct_runtime macro - #![allow(clippy::from_over_into)] - - use super::*; - use bp_currency_exchange::LockFundsTransaction; - use frame_support::{ - assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight, - }; - use sp_core::H256; - use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, - }; - - type AccountId = u64; - - const INVALID_TRANSACTION_ID: u64 = 100; - const ALREADY_CLAIMED_TRANSACTION_ID: u64 = 101; - const UNKNOWN_RECIPIENT_ID: u64 = 0; - const INVALID_AMOUNT: u64 = 0; - const MAX_DEPOSIT_AMOUNT: u64 = 1000; - const SUBMITTER: u64 = 2000; - - type RawTransaction = LockFundsTransaction; - - pub struct DummyTransactionSubmissionHandler; - - impl OnTransactionSubmitted for DummyTransactionSubmissionHandler { - fn on_valid_transaction_submitted(submitter: AccountId) { - Transfers::::insert(submitter, ()); - } - } - - pub struct DummyBlockchain; - - impl InclusionProofVerifier for DummyBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = (bool, RawTransaction); - - fn verify_transaction_inclusion_proof( - proof: &Self::TransactionInclusionProof, - ) -> Option { - if proof.0 { - Some(proof.1.clone()) - } else { - None - } - } - } - - pub struct DummyTransaction; - - impl MaybeLockFundsTransaction for DummyTransaction { - type Transaction = RawTransaction; - type Id = u64; - type Recipient = AccountId; - type Amount = u64; - - fn parse(tx: &Self::Transaction) -> bp_currency_exchange::Result { - match tx.id { - INVALID_TRANSACTION_ID => Err(ExchangeError::InvalidTransaction), - _ => Ok(tx.clone()), - } - } - } - - pub struct DummyRecipientsMap; - - impl RecipientsMap for DummyRecipientsMap { - type PeerRecipient = AccountId; - type Recipient = AccountId; - - fn map( - peer_recipient: Self::PeerRecipient, - ) -> bp_currency_exchange::Result { - match peer_recipient { - UNKNOWN_RECIPIENT_ID => Err(ExchangeError::FailedToMapRecipients), - _ => Ok(peer_recipient * 10), - } - } - } - - pub struct DummyCurrencyConverter; - - impl CurrencyConverter for DummyCurrencyConverter { - type SourceAmount = u64; - type TargetAmount = u64; - - fn convert(amount: Self::SourceAmount) -> bp_currency_exchange::Result { - match amount { - INVALID_AMOUNT => Err(ExchangeError::FailedToConvertCurrency), - _ => Ok(amount * 10), - } - } - } - - pub struct DummyDepositInto; - - impl DepositInto for DummyDepositInto { - type Recipient = AccountId; - type Amount = u64; - - fn deposit_into( - _recipient: Self::Recipient, - amount: Self::Amount, - ) -> bp_currency_exchange::Result<()> { - match amount { - amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()), - amount if amount == MAX_DEPOSIT_AMOUNT * 10 => - Err(ExchangeError::DepositPartiallyFailed), - _ => Err(ExchangeError::DepositFailed), - } - } - } - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; - use crate as pallet_bridge_currency_exchange; - - construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Exchange: pallet_bridge_currency_exchange::{Pallet}, - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - } - - impl Config for TestRuntime { - type OnTransactionSubmitted = DummyTransactionSubmissionHandler; - type PeerBlockchain = DummyBlockchain; - type PeerMaybeLockFundsTransaction = DummyTransaction; - type RecipientsMap = DummyRecipientsMap; - type Amount = u64; - type CurrencyConverter = DummyCurrencyConverter; - type DepositInto = DummyDepositInto; - } - - fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - sp_io::TestExternalities::new(t) - } - - fn transaction(id: u64) -> RawTransaction { - RawTransaction { id, recipient: 1, amount: 2 } - } - - #[test] - fn unfinalized_transaction_rejected() { - new_test_ext().execute_with(|| { - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (false, transaction(0)) - ), - Error::::UnfinalizedTransaction, - ); - }); - } - - #[test] - fn invalid_transaction_rejected() { - new_test_ext().execute_with(|| { - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(INVALID_TRANSACTION_ID)), - ), - Error::::InvalidTransaction, - ); - }); - } - - #[test] - fn claimed_transaction_rejected() { - new_test_ext().execute_with(|| { - ::Transfers::insert(ALREADY_CLAIMED_TRANSACTION_ID, ()); - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(ALREADY_CLAIMED_TRANSACTION_ID)), - ), - Error::::AlreadyClaimed, - ); - }); - } - - #[test] - fn transaction_with_unknown_recipient_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.recipient = UNKNOWN_RECIPIENT_ID; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::FailedToMapRecipients, - ); - }); - } - - #[test] - fn transaction_with_invalid_amount_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = INVALID_AMOUNT; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::FailedToConvertCurrency, - ); - }); - } - - #[test] - fn transaction_with_invalid_deposit_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = MAX_DEPOSIT_AMOUNT + 1; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::DepositFailed, - ); - }); - } - - #[test] - fn valid_transaction_accepted_even_if_deposit_partially_fails() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = MAX_DEPOSIT_AMOUNT; - assert_ok!(Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction), - ),); - - // ensure that the transfer has been marked as completed - assert!(::Transfers::contains_key(0u64)); - // ensure that submitter has been rewarded - assert!(::Transfers::contains_key(SUBMITTER)); - }); - } - - #[test] - fn valid_transaction_accepted() { - new_test_ext().execute_with(|| { - assert_ok!(Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(0)), - ),); - - // ensure that the transfer has been marked as completed - assert!(::Transfers::contains_key(0u64)); - // ensure that submitter has been rewarded - assert!(::Transfers::contains_key(SUBMITTER)); - }); - } -} diff --git a/bridges/modules/ethereum-contract-builtin/Cargo.toml b/bridges/modules/ethereum-contract-builtin/Cargo.toml deleted file mode 100644 index ffb98bc6bd85..000000000000 --- a/bridges/modules/ethereum-contract-builtin/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "ethereum-contract-builtin" -description = "Small crate that helps Solidity contract to verify finality proof." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0" } -ethereum-types = "0.12.0" -finality-grandpa = "0.14.0" -hex = "0.4" -log = "0.4.14" - -# Runtime/chain specific dependencies - -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/bridges/modules/ethereum-contract-builtin/src/lib.rs b/bridges/modules/ethereum-contract-builtin/src/lib.rs deleted file mode 100644 index 4a830f8e0a38..000000000000 --- a/bridges/modules/ethereum-contract-builtin/src/lib.rs +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use codec::{Decode, Encode}; -use ethereum_types::U256; -use finality_grandpa::voter_set::VoterSet; -use rialto_runtime::{Block, BlockNumber, Hash, Header as RuntimeHeader}; -use sp_blockchain::Error as ClientError; -use sp_finality_grandpa::{AuthorityList, ConsensusLog, GRANDPA_ENGINE_ID}; - -/// Builtin errors. -#[derive(Debug)] -pub enum Error { - /// Failed to decode block number. - BlockNumberDecode, - /// Failed to decode Substrate header. - HeaderDecode(codec::Error), - /// Failed to decode the best voters set. - BestSetDecode(codec::Error), - /// The best voters set is invalid. - InvalidBestSet, - /// Failed to decode finality proof. - FinalityProofDecode(codec::Error), - /// Failed to verify justification. - JustificationVerify(Box), -} - -/// Substrate header. -#[derive(Debug, PartialEq)] -pub struct Header { - /// Header hash. - pub hash: Hash, - /// Parent header hash. - pub parent_hash: Hash, - /// Header number. - pub number: BlockNumber, - /// GRANDPA validators change signal. - pub signal: Option, -} - -/// GRANDPA validators set change signal. -#[derive(Debug, PartialEq)] -pub struct ValidatorsSetSignal { - /// Signal delay. - pub delay: BlockNumber, - /// New validators set. - pub validators: Vec, -} - -/// Convert from U256 to BlockNumber. Fails if `U256` value isn't fitting within `BlockNumber` -/// limits (the runtime referenced by this module uses u32 as `BlockNumber`). -pub fn to_substrate_block_number(number: U256) -> Result { - let substrate_block_number = match number == number.low_u32().into() { - true => Ok(number.low_u32()), - false => Err(Error::BlockNumberDecode), - }; - - log::trace!( - target: "bridge-builtin", - "Parsed Substrate block number from {}: {:?}", - number, - substrate_block_number, - ); - - substrate_block_number -} - -/// Convert from BlockNumber to U256. -pub fn from_substrate_block_number(number: BlockNumber) -> Result { - Ok(U256::from(number as u64)) -} - -/// Parse Substrate header. -pub fn parse_substrate_header(raw_header: &[u8]) -> Result { - let substrate_header = RuntimeHeader::decode(&mut &*raw_header) - .map(|header| Header { - hash: header.hash(), - parent_hash: header.parent_hash, - number: header.number, - signal: sp_runtime::traits::Header::digest(&header) - .log(|log| { - log.as_consensus().and_then(|(engine_id, log)| { - if engine_id == GRANDPA_ENGINE_ID { - Some(log) - } else { - None - } - }) - }) - .and_then(|log| ConsensusLog::decode(&mut &*log).ok()) - .and_then(|log| match log { - ConsensusLog::ScheduledChange(scheduled_change) => Some(ValidatorsSetSignal { - delay: scheduled_change.delay, - validators: scheduled_change.next_authorities.encode(), - }), - _ => None, - }), - }) - .map_err(Error::HeaderDecode); - - log::debug!( - target: "bridge-builtin", - "Parsed Substrate header {}: {:?}", - if substrate_header.is_ok() { - format!("<{}-bytes-blob>", raw_header.len()) - } else { - hex::encode(raw_header) - }, - substrate_header, - ); - - substrate_header -} - -/// Verify GRANDPA finality proof. -pub fn verify_substrate_finality_proof( - finality_target_number: BlockNumber, - finality_target_hash: Hash, - best_set_id: u64, - raw_best_set: &[u8], - raw_finality_proof: &[u8], -) -> Result<(), Error> { - let best_set = AuthorityList::decode(&mut &*raw_best_set) - .map_err(Error::BestSetDecode) - .and_then(|authorities| { - VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet) - }); - - log::debug!( - target: "bridge-builtin", - "Parsed Substrate authorities set {}: {:?}", - if best_set.is_ok() { - format!("<{}-bytes-blob>", raw_best_set.len()) - } else { - hex::encode(raw_best_set) - }, - best_set, - ); - - let best_set = best_set?; - - let verify_result = - sc_finality_grandpa::GrandpaJustification::::decode_and_verify_finalizes( - raw_finality_proof, - (finality_target_hash, finality_target_number), - best_set_id, - &best_set, - ) - .map_err(Box::new) - .map_err(Error::JustificationVerify) - .map(|_| ()); - - log::debug!( - target: "bridge-builtin", - "Verified Substrate finality proof {}: {:?}", - if verify_result.is_ok() { - format!("<{}-bytes-blob>", raw_finality_proof.len()) - } else { - hex::encode(raw_finality_proof) - }, - verify_result, - ); - - verify_result -} - -#[cfg(test)] -mod tests { - use super::*; - use rialto_runtime::DigestItem; - use sp_core::crypto::Public; - use sp_finality_grandpa::{AuthorityId, ScheduledChange}; - use sp_runtime::generic::Digest; - - #[test] - fn to_substrate_block_number_succeeds() { - assert_eq!(to_substrate_block_number(U256::zero()).unwrap(), 0); - assert_eq!( - to_substrate_block_number(U256::from(std::u32::MAX as u64)).unwrap(), - 0xFFFFFFFF - ); - } - - #[test] - fn to_substrate_block_number_fails() { - assert!(matches!( - to_substrate_block_number(U256::from(std::u32::MAX as u64 + 1)), - Err(Error::BlockNumberDecode) - )); - } - - #[test] - fn from_substrate_block_number_succeeds() { - assert_eq!(from_substrate_block_number(0).unwrap(), U256::zero()); - assert_eq!(from_substrate_block_number(std::u32::MAX).unwrap(), U256::from(std::u32::MAX)); - } - - #[test] - fn substrate_header_without_signal_parsed() { - let raw_header = RuntimeHeader { - parent_hash: [0u8; 32].into(), - number: 0, - state_root: "b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e7" - .parse() - .unwrap(), - extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" - .parse() - .unwrap(), - digest: Default::default(), - } - .encode(); - assert_eq!( - raw_header, - hex::decode("000000000000000000000000000000000000000000000000000000000000000000b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e703170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c11131400").unwrap(), - ); - - assert_eq!( - parse_substrate_header(&raw_header).unwrap(), - Header { - hash: "afbbeb92bf6ff14f60bdef0aa89f043dd403659ae82665238810ace0d761f6d0" - .parse() - .unwrap(), - parent_hash: Default::default(), - number: 0, - signal: None, - }, - ); - } - - #[test] - fn substrate_header_with_signal_parsed() { - let authorities = vec![ - (AuthorityId::from_slice(&[1; 32]), 101), - (AuthorityId::from_slice(&[3; 32]), 103), - ]; - let mut digest = Digest::default(); - digest.push(DigestItem::Consensus( - GRANDPA_ENGINE_ID, - ConsensusLog::ScheduledChange(ScheduledChange { - next_authorities: authorities.clone(), - delay: 8, - }) - .encode(), - )); - - let raw_header = RuntimeHeader { - parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" - .parse() - .unwrap(), - number: 8, - state_root: "822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aa" - .parse() - .unwrap(), - extrinsics_root: "e7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928" - .parse() - .unwrap(), - digest, - } - .encode(); - assert_eq!( - raw_header, - hex::decode("c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b20822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aae7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928040446524e4b59010108010101010101010101010101010101010101010101010101010101010101010165000000000000000303030303030303030303030303030303030303030303030303030303030303670000000000000008000000").unwrap(), - ); - - assert_eq!( - parse_substrate_header(&raw_header).unwrap(), - Header { - hash: "3dfebb280bd87a4640f89d7f2adecd62b88148747bff5b63af6e1634ee37a56e" - .parse() - .unwrap(), - parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" - .parse() - .unwrap(), - number: 8, - signal: Some(ValidatorsSetSignal { delay: 8, validators: authorities.encode() }), - }, - ); - } - - /// Number of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8; - /// Hash of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = - "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775"; - /// Id of authorities set that have generated example justification. Could be computed by - /// tracking every set change in canonized headers. - const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0; - /// Encoded authorities set that has generated example justification. Could be fetched from - /// `ScheduledChange` digest of the block that has scheduled this set OR by calling - /// `GrandpaApi::grandpa_authorities()` at appropriate block. - const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000"; - /// Example justification. Could be fetched by calling 'chain_getBlock' RPC. - const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900"; - - #[test] - fn substrate_header_parse_fails() { - assert!(matches!(parse_substrate_header(&[]), Err(_))); - } - - #[test] - fn verify_substrate_finality_proof_succeeds() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_block_is_finalized() { - verify_substrate_finality_proof( - 4, - Default::default(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_set_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode("deadbeef").unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_set_id_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - 42, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_proof_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - 0, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode("deadbeef").unwrap(), - ) - .unwrap_err(); - } -} diff --git a/bridges/modules/ethereum/Cargo.toml b/bridges/modules/ethereum/Cargo.toml deleted file mode 100644 index baa933611807..000000000000 --- a/bridges/modules/ethereum/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "pallet-bridge-eth-poa" -description = "A Substrate Runtime module that is able to verify PoA headers and their finality." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } -libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"], optional = true } -log = { version = "0.4.14", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-eth-poa = { path = "../../primitives/ethereum-poa", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -libsecp256k1 = { version = "0.7", features = ["hmac"] } -hex-literal = "0.3" - -[features] -default = ["std"] -std = [ - "bp-eth-poa/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "serde", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking", - "libsecp256k1", -] diff --git a/bridges/modules/ethereum/src/benchmarking.rs b/bridges/modules/ethereum/src/benchmarking.rs deleted file mode 100644 index 511cbcac1ade..000000000000 --- a/bridges/modules/ethereum/src/benchmarking.rs +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use super::*; - -use crate::test_utils::{ - build_custom_header, build_genesis_header, insert_header, validator_utils::*, - validators_change_receipt, HeaderBuilder, -}; - -use bp_eth_poa::{compute_merkle_root, U256}; -use frame_benchmarking::benchmarks_instance_pallet; -use frame_system::RawOrigin; - -benchmarks_instance_pallet! { - // Benchmark `import_unsigned_header` extrinsic with the best possible conditions: - // * Parent header is finalized. - // * New header doesn't require receipts. - // * Nothing is finalized by new header. - // * Nothing is pruned by new header. - import_unsigned_header_best_case { - let n in 1..1000; - - let num_validators = 2; - let initial_header = initialize_bench::(num_validators); - - // prepare header to be inserted - let header = build_custom_header( - &validator(1), - &initial_header, - |mut header| { - header.gas_limit = header.gas_limit + U256::from(n); - header - }, - ); - }: import_unsigned_header(RawOrigin::None, Box::new(header), None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, 1); - assert_eq!(storage.finalized_block().number, 0); - } - - // Our goal with this bench is to try and see the effect that finalizing difference ranges of - // blocks has on our import time. As such we need to make sure that we keep the number of - // validators fixed while changing the number blocks finalized (the complexity parameter) by - // importing the last header. - // - // One important thing to keep in mind is that the runtime provides a finality cache in order to - // reduce the overhead of header finalization. However, this is only triggered every 16 blocks. - import_unsigned_finality { - // Our complexity parameter, n, will represent the number of blocks imported before - // finalization. - let n in 1..7; - - let mut storage = BridgeStorage::::new(); - let num_validators: u32 = 2; - let initial_header = initialize_bench::(num_validators as usize); - - // Since we only have two validators we need to make sure the number of blocks is even to - // make sure the right validator signs the final block - let num_blocks = 2 * n; - let mut headers = Vec::new(); - let mut parent = initial_header.clone(); - - // Import a bunch of headers without any verification, will ensure that they're not - // finalized prematurely - for i in 1..=num_blocks { - let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - headers.push(header.clone()); - parent = header; - } - - let last_header = headers.last().unwrap().clone(); - let last_authority = validator(1); - - // Need to make sure that the header we're going to import hasn't been inserted - // into storage already - let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); - }: import_unsigned_header(RawOrigin::None, Box::new(header), None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); - assert_eq!(storage.finalized_block().number, num_blocks as u64); - } - - // Basically the exact same as `import_unsigned_finality` but with a different range for the - // complexity parameter. In this bench we use a larger range of blocks to see how performance - // changes when the finality cache kicks in (>16 blocks). - import_unsigned_finality_with_cache { - // Our complexity parameter, n, will represent the number of blocks imported before - // finalization. - let n in 7..100; - - let mut storage = BridgeStorage::::new(); - let num_validators: u32 = 2; - let initial_header = initialize_bench::(num_validators as usize); - - // Since we only have two validators we need to make sure the number of blocks is even to - // make sure the right validator signs the final block - let num_blocks = 2 * n; - let mut headers = Vec::new(); - let mut parent = initial_header.clone(); - - // Import a bunch of headers without any verification, will ensure that they're not - // finalized prematurely - for i in 1..=num_blocks { - let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - headers.push(header.clone()); - parent = header; - } - - let last_header = headers.last().unwrap().clone(); - let last_authority = validator(1); - - // Need to make sure that the header we're going to import hasn't been inserted - // into storage already - let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); - }: import_unsigned_header(RawOrigin::None, Box::new(header), None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); - assert_eq!(storage.finalized_block().number, num_blocks as u64); - } - - // A block import may trigger a pruning event, which adds extra work to the import progress. - // In this bench we trigger a pruning event in order to see how much extra time is spent by the - // runtime dealing with it. In the Ethereum Pallet, we're limited pruning to eight blocks in a - // single import, as dictated by MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT. - import_unsigned_pruning { - let n in 1..MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT as u32; - - let mut storage = BridgeStorage::::new(); - - let num_validators = 3; - let initial_header = initialize_bench::(num_validators as usize); - let validators = validators(num_validators); - - // Want to prune eligible blocks between [0, n) - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: n as u64, - }); - - let mut parent = initial_header; - for i in 1..=n { - let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - parent = header; - } - - let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); - }: import_unsigned_header(RawOrigin::None, Box::new(header), None) - verify { - let storage = BridgeStorage::::new(); - let max_pruned: u64 = (n - 1) as _; - assert_eq!(storage.best_block().0.number, (n + 1) as u64); - assert!(HeadersByNumber::::get(&0).is_none()); - assert!(HeadersByNumber::::get(&max_pruned).is_none()); - } - - // The goal of this bench is to import a block which contains a transaction receipt. The receipt - // will contain a validator set change. Verifying the receipt root is an expensive operation to - // do, which is why we're interested in benchmarking it. - import_unsigned_with_receipts { - let n in 1..100; - - let mut storage = BridgeStorage::::new(); - - let num_validators = 1; - let initial_header = initialize_bench::(num_validators as usize); - - let mut receipts = vec![]; - for i in 1..=n { - let receipt = validators_change_receipt(Default::default()); - receipts.push(receipt) - } - let encoded_receipts = receipts.iter().map(|r| r.rlp()); - - // We need this extra header since this is what signals a validator set transition. This - // will ensure that the next header is within the "Contract" window - let header1 = HeaderBuilder::with_parent(&initial_header).sign_by(&validator(0)); - insert_header(&mut storage, header1.clone()); - - let header = build_custom_header( - &validator(0), - &header1, - |mut header| { - // Logs Bloom signals a change in validator set - header.log_bloom = (&[0xff; 256]).into(); - header.receipts_root = compute_merkle_root(encoded_receipts); - header - }, - ); - }: import_unsigned_header(RawOrigin::None, Box::new(header), Some(receipts)) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, 2); - } -} - -fn initialize_bench, I: 'static>(num_validators: usize) -> AuraHeader { - // Initialize storage with some initial header - let initial_header = build_genesis_header(&validator(0)); - let initial_difficulty = initial_header.difficulty; - let initial_validators = validators_addresses(num_validators as usize); - - initialize_storage::(&initial_header, initial_difficulty, &initial_validators); - - initial_header -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, TestRuntime}; - use frame_support::assert_ok; - - #[test] - fn insert_unsigned_header_best_case() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_header_best_case::()); - }); - } - - #[test] - fn insert_unsigned_header_finality() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_finality::()); - }); - } - - #[test] - fn insert_unsigned_header_finality_with_cache() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_finality_with_cache::()); - }); - } - - #[test] - fn insert_unsigned_header_pruning() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_pruning::()); - }); - } - - #[test] - fn insert_unsigned_header_receipts() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_with_receipts::()); - }); - } -} diff --git a/bridges/modules/ethereum/src/error.rs b/bridges/modules/ethereum/src/error.rs deleted file mode 100644 index 6fd376b01715..000000000000 --- a/bridges/modules/ethereum/src/error.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use sp_runtime::RuntimeDebug; - -/// Header import error. -#[derive(Clone, Copy, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub enum Error { - /// The header is beyond last finalized and can not be imported. - AncientHeader = 0, - /// The header is already imported. - KnownHeader = 1, - /// Seal has an incorrect format. - InvalidSealArity = 2, - /// Block number isn't sensible. - RidiculousNumber = 3, - /// Block has too much gas used. - TooMuchGasUsed = 4, - /// Gas limit header field is invalid. - InvalidGasLimit = 5, - /// Extra data is of an invalid length. - ExtraDataOutOfBounds = 6, - /// Timestamp header overflowed. - TimestampOverflow = 7, - /// The parent header is missing from the blockchain. - MissingParentBlock = 8, - /// The header step is missing from the header. - MissingStep = 9, - /// The header signature is missing from the header. - MissingSignature = 10, - /// Empty steps are missing from the header. - MissingEmptySteps = 11, - /// The same author issued different votes at the same step. - DoubleVote = 12, - /// Validation proof insufficient. - InsufficientProof = 13, - /// Difficulty header field is invalid. - InvalidDifficulty = 14, - /// The received block is from an incorrect proposer. - NotValidator = 15, - /// Missing transaction receipts for the operation. - MissingTransactionsReceipts = 16, - /// Redundant transaction receipts are provided. - RedundantTransactionsReceipts = 17, - /// Provided transactions receipts are not matching the header. - TransactionsReceiptsMismatch = 18, - /// Can't accept unsigned header from the far future. - UnsignedTooFarInTheFuture = 19, - /// Trying to finalize sibling of finalized block. - TryingToFinalizeSibling = 20, - /// Header timestamp is ahead of on-chain timestamp - HeaderTimestampIsAhead = 21, -} - -impl Error { - pub fn msg(&self) -> &'static str { - match *self { - Error::AncientHeader => "Header is beyound last finalized and can not be imported", - Error::KnownHeader => "Header is already imported", - Error::InvalidSealArity => "Header has an incorrect seal", - Error::RidiculousNumber => "Header has too large number", - Error::TooMuchGasUsed => "Header has too much gas used", - Error::InvalidGasLimit => "Header has invalid gas limit", - Error::ExtraDataOutOfBounds => "Header has too large extra data", - Error::TimestampOverflow => "Header has too large timestamp", - Error::MissingParentBlock => "Header has unknown parent hash", - Error::MissingStep => "Header is missing step seal", - Error::MissingSignature => "Header is missing signature seal", - Error::MissingEmptySteps => "Header is missing empty steps seal", - Error::DoubleVote => "Header has invalid step in seal", - Error::InsufficientProof => "Header has insufficient proof", - Error::InvalidDifficulty => "Header has invalid difficulty", - Error::NotValidator => "Header is sealed by unexpected validator", - Error::MissingTransactionsReceipts => - "The import operation requires transactions receipts", - Error::RedundantTransactionsReceipts => "Redundant transactions receipts are provided", - Error::TransactionsReceiptsMismatch => "Invalid transactions receipts provided", - Error::UnsignedTooFarInTheFuture => "The unsigned header is too far in future", - Error::TryingToFinalizeSibling => "Trying to finalize sibling of finalized block", - Error::HeaderTimestampIsAhead => "Header timestamp is ahead of on-chain timestamp", - } - } - - /// Return unique error code. - pub fn code(&self) -> u8 { - *self as u8 - } -} diff --git a/bridges/modules/ethereum/src/finality.rs b/bridges/modules/ethereum/src/finality.rs deleted file mode 100644 index fe8841fcc044..000000000000 --- a/bridges/modules/ethereum/src/finality.rs +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{error::Error, Storage}; -use bp_eth_poa::{public_to_address, Address, AuraHeader, HeaderId, SealedEmptyStep, H256}; -use codec::{Decode, Encode}; -use scale_info::TypeInfo; -use sp_io::crypto::secp256k1_ecdsa_recover; -use sp_runtime::RuntimeDebug; -use sp_std::{ - collections::{ - btree_map::{BTreeMap, Entry}, - btree_set::BTreeSet, - vec_deque::VecDeque, - }, - prelude::*, -}; - -/// Cached finality votes for given block. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct CachedFinalityVotes { - /// True if we have stopped at best finalized block' sibling. This means - /// that we are trying to finalize block from fork that has forked before - /// best finalized. - pub stopped_at_finalized_sibling: bool, - /// Header ancestors that were read while we have been searching for - /// cached votes entry. The newest header has index 0. - pub unaccounted_ancestry: VecDeque<(HeaderId, Option, AuraHeader)>, - /// Cached finality votes, if they have been found. The associated - /// header is not included into `unaccounted_ancestry`. - pub votes: Option>, -} - -/// Finality effects. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct FinalityEffects { - /// Finalized headers. - pub finalized_headers: Vec<(HeaderId, Option)>, - /// Finality votes used in computation. - pub votes: FinalityVotes, -} - -/// Finality votes for given block. -#[derive(RuntimeDebug, Decode, Encode, TypeInfo)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct FinalityVotes { - /// Number of votes per each validator. - pub votes: BTreeMap, - /// Ancestry blocks with the oldest ancestors at the beginning and newest at the - /// end of the queue. - pub ancestry: VecDeque>, -} - -/// Information about block ancestor that is used in computations. -#[derive(RuntimeDebug, Decode, Encode, TypeInfo)] -#[cfg_attr(test, derive(Clone, Default, PartialEq))] -pub struct FinalityAncestor { - /// Bock id. - pub id: HeaderId, - /// Block submitter. - pub submitter: Option, - /// Validators that have signed this block and empty steps on top - /// of this block. - pub signers: BTreeSet
, -} - -/// Tries to finalize blocks when given block is imported. -/// -/// Returns numbers and hashes of finalized blocks in ascending order. -pub fn finalize_blocks( - storage: &S, - best_finalized: HeaderId, - header_validators: (HeaderId, &[Address]), - id: HeaderId, - submitter: Option<&S::Submitter>, - header: &AuraHeader, - two_thirds_majority_transition: u64, -) -> Result, Error> { - // compute count of voters for every unfinalized block in ancestry - let validators = header_validators.1.iter().collect(); - let votes = prepare_votes( - header - .parent_id() - .map(|parent_id| { - storage.cached_finality_votes(&parent_id, &best_finalized, |hash| { - *hash == header_validators.0.hash || *hash == best_finalized.hash - }) - }) - .unwrap_or_default(), - best_finalized, - &validators, - id, - header, - submitter.cloned(), - )?; - - // now let's iterate in reverse order && find just finalized blocks - let mut finalized_headers = Vec::new(); - let mut current_votes = votes.votes.clone(); - for ancestor in &votes.ancestry { - if !is_finalized( - &validators, - ¤t_votes, - ancestor.id.number >= two_thirds_majority_transition, - ) { - break - } - - remove_signers_votes(&ancestor.signers, &mut current_votes); - finalized_headers.push((ancestor.id, ancestor.submitter.clone())); - } - - Ok(FinalityEffects { finalized_headers, votes }) -} - -/// Returns true if there are enough votes to treat this header as finalized. -fn is_finalized( - validators: &BTreeSet<&Address>, - votes: &BTreeMap, - requires_two_thirds_majority: bool, -) -> bool { - (!requires_two_thirds_majority && votes.len() * 2 > validators.len()) || - (requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2) -} - -/// Prepare 'votes' of header and its ancestors' signers. -pub(crate) fn prepare_votes( - mut cached_votes: CachedFinalityVotes, - best_finalized: HeaderId, - validators: &BTreeSet<&Address>, - id: HeaderId, - header: &AuraHeader, - submitter: Option, -) -> Result, Error> { - // if we have reached finalized block sibling, then we're trying - // to switch finalized blocks - if cached_votes.stopped_at_finalized_sibling { - return Err(Error::TryingToFinalizeSibling) - } - - // this fn can only work with single validators set - if !validators.contains(&header.author) { - return Err(Error::NotValidator) - } - - // now we have votes that were valid when some block B has been inserted - // things may have changed a bit, but we do not need to read anything else - // from the db, because we have ancestry - // so the only thing we need to do is: - // 1) remove votes from blocks that have been finalized after B has been inserted; - // 2) add votes from B descendants - let mut votes = cached_votes.votes.unwrap_or_default(); - - // remove votes from finalized blocks - while let Some(old_ancestor) = votes.ancestry.pop_front() { - if old_ancestor.id.number > best_finalized.number { - votes.ancestry.push_front(old_ancestor); - break - } - - remove_signers_votes(&old_ancestor.signers, &mut votes.votes); - } - - // add votes from new blocks - let mut parent_empty_step_signers = empty_steps_signers(header); - let mut unaccounted_ancestry = VecDeque::new(); - while let Some((ancestor_id, ancestor_submitter, ancestor)) = - cached_votes.unaccounted_ancestry.pop_front() - { - let mut signers = empty_steps_signers(&ancestor); - sp_std::mem::swap(&mut signers, &mut parent_empty_step_signers); - signers.insert(ancestor.author); - - add_signers_votes(validators, &signers, &mut votes.votes)?; - - unaccounted_ancestry.push_front(FinalityAncestor { - id: ancestor_id, - submitter: ancestor_submitter, - signers, - }); - } - votes.ancestry.extend(unaccounted_ancestry); - - // add votes from block itself - let mut header_signers = BTreeSet::new(); - header_signers.insert(header.author); - *votes.votes.entry(header.author).or_insert(0) += 1; - votes - .ancestry - .push_back(FinalityAncestor { id, submitter, signers: header_signers }); - - Ok(votes) -} - -/// Increase count of 'votes' for every passed signer. -/// Fails if at least one of signers is not in the `validators` set. -fn add_signers_votes( - validators: &BTreeSet<&Address>, - signers_to_add: &BTreeSet
, - votes: &mut BTreeMap, -) -> Result<(), Error> { - for signer in signers_to_add { - if !validators.contains(signer) { - return Err(Error::NotValidator) - } - - *votes.entry(*signer).or_insert(0) += 1; - } - - Ok(()) -} - -/// Decrease 'votes' count for every passed signer. -fn remove_signers_votes(signers_to_remove: &BTreeSet
, votes: &mut BTreeMap) { - for signer in signers_to_remove { - match votes.entry(*signer) { - Entry::Occupied(mut entry) => - if *entry.get() <= 1 { - entry.remove(); - } else { - *entry.get_mut() -= 1; - }, - Entry::Vacant(_) => unreachable!("we only remove signers that have been added; qed"), - } - } -} - -/// Returns unique set of empty steps signers. -fn empty_steps_signers(header: &AuraHeader) -> BTreeSet
{ - header - .empty_steps() - .into_iter() - .flatten() - .filter_map(|step| empty_step_signer(&step, &header.parent_hash)) - .collect::>() -} - -/// Returns author of empty step signature. -fn empty_step_signer(empty_step: &SealedEmptyStep, parent_hash: &H256) -> Option
{ - let message = empty_step.message(parent_hash); - secp256k1_ecdsa_recover(empty_step.signature.as_fixed_bytes(), message.as_fixed_bytes()) - .ok() - .map(|public| public_to_address(&public)) -} - -impl Default for CachedFinalityVotes { - fn default() -> Self { - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: VecDeque::new(), - votes: None, - } - } -} - -impl Default for FinalityVotes { - fn default() -> Self { - FinalityVotes { votes: BTreeMap::new(), ancestry: VecDeque::new() } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime, - }, - BridgeStorage, FinalityCache, HeaderToImport, - }; - - const TOTAL_VALIDATORS: usize = 5; - - #[test] - fn verifies_header_author() { - run_test(TOTAL_VALIDATORS, |_| { - assert_eq!( - finalize_blocks( - &BridgeStorage::::new(), - Default::default(), - (Default::default(), &[]), - Default::default(), - None, - &AuraHeader::default(), - 0, - ), - Err(Error::NotValidator), - ); - }); - } - - #[test] - fn finalize_blocks_works() { - run_test(TOTAL_VALIDATORS, |ctx| { - // let's say we have 5 validators (we need 'votes' from 3 validators to achieve - // finality) - let mut storage = BridgeStorage::::new(); - - // when header#1 is inserted, nothing is finalized (1 vote) - let header1 = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(0)); - let id1 = header1.compute_id(); - let mut header_to_import = HeaderToImport { - context: storage.import_context(None, &header1.parent_hash).unwrap(), - is_best: true, - id: id1, - header: header1, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes: Default::default(), - }; - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id1, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(Vec::new()), - ); - storage.insert_header(header_to_import.clone()); - - // when header#2 is inserted, nothing is finalized (2 votes) - header_to_import.header = - HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1)); - header_to_import.id = header_to_import.header.compute_id(); - let id2 = header_to_import.header.compute_id(); - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id2, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(Vec::new()), - ); - storage.insert_header(header_to_import.clone()); - - // when header#3 is inserted, header#1 is finalized (3 votes) - header_to_import.header = - HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2)); - header_to_import.id = header_to_import.header.compute_id(); - let id3 = header_to_import.header.compute_id(); - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id3, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(vec![(id1, None)]), - ); - storage.insert_header(header_to_import); - }); - } - - #[test] - fn cached_votes_are_updated_with_ancestry() { - // we're inserting header#5 - // cached votes are from header#3 - // header#4 has finalized header#1 and header#2 - // => when inserting header#5, we need to: - // 1) remove votes from header#1 and header#2 - // 2) add votes from header#4 and header#5 - let validators = validators_addresses(5); - let headers = (1..6) - .map(|number| { - HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1)) - }) - .collect::>(); - let ancestry = headers - .iter() - .map(|header| FinalityAncestor { - id: header.compute_id(), - signers: vec![header.author].into_iter().collect(), - ..Default::default() - }) - .collect::>(); - let header5 = headers[4].clone(); - assert_eq!( - prepare_votes::<()>( - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: - vec![(headers[3].compute_id(), None, headers[3].clone()),] - .into_iter() - .collect(), - votes: Some(FinalityVotes { - votes: vec![(validators[0], 1), (validators[1], 1), (validators[2], 1),] - .into_iter() - .collect(), - ancestry: ancestry[..3].iter().cloned().collect(), - }), - }, - headers[1].compute_id(), - &validators.iter().collect(), - header5.compute_id(), - &header5, - None, - ) - .unwrap(), - FinalityVotes { - votes: vec![(validators[2], 1), (validators[3], 1), (validators[4], 1),] - .into_iter() - .collect(), - ancestry: ancestry[2..].iter().cloned().collect(), - }, - ); - } - - #[test] - fn prepare_votes_respects_finality_cache() { - run_test(TOTAL_VALIDATORS, |ctx| { - // we need signatures of 3 validators to finalize block - let mut storage = BridgeStorage::::new(); - - // headers 1..3 are signed by validator#0 - // headers 4..6 are signed by validator#1 - // headers 7..9 are signed by validator#2 - let mut hashes = Vec::new(); - let mut headers = Vec::new(); - let mut ancestry = Vec::new(); - let mut parent_hash = ctx.genesis.compute_hash(); - for i in 1..10 { - let header = - HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - hashes.push(id.hash); - ancestry.push(FinalityAncestor { - id: header.compute_id(), - submitter: None, - signers: vec![header.author].into_iter().collect(), - }); - headers.push(header); - parent_hash = id.hash; - } - - // when we're inserting header#7 and last finalized header is 0: - // check that votes at #7 are computed correctly without cache - let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 3), (ctx.addresses[2], 1)] - .into_iter() - .collect(), - ancestry: ancestry[..7].iter().cloned().collect(), - }; - let id7 = headers[6].compute_id(); - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &ctx.genesis.compute_id(), - |_| false, - ), - Default::default(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - - // cached votes at #5 - let expected_votes_at_5 = FinalityVotes { - votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 2)].into_iter().collect(), - ancestry: ancestry[..5].iter().cloned().collect(), - }; - FinalityCache::::insert(hashes[4], expected_votes_at_5); - - // when we're inserting header#7 and last finalized header is 0: - // check that votes at #7 are computed correctly with cache - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &ctx.genesis.compute_id(), - |_| false, - ), - Default::default(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - - // when we're inserting header#7 and last finalized header is 3: - // check that votes at #7 are computed correctly with cache - let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[1], 3), (ctx.addresses[2], 1)].into_iter().collect(), - ancestry: ancestry[3..7].iter().cloned().collect(), - }; - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &headers.get(2).unwrap().compute_id(), - |hash| *hash == hashes[2], - ), - headers[2].compute_id(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - }); - } - - #[test] - fn prepare_votes_fails_when_finalized_sibling_is_in_ancestry() { - assert_eq!( - prepare_votes::<()>( - CachedFinalityVotes { stopped_at_finalized_sibling: true, ..Default::default() }, - Default::default(), - &validators_addresses(3).iter().collect(), - Default::default(), - &Default::default(), - None, - ), - Err(Error::TryingToFinalizeSibling), - ); - } -} diff --git a/bridges/modules/ethereum/src/import.rs b/bridges/modules/ethereum/src/import.rs deleted file mode 100644 index 377743245777..000000000000 --- a/bridges/modules/ethereum/src/import.rs +++ /dev/null @@ -1,600 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::Error, - finality::finalize_blocks, - validators::{Validators, ValidatorsConfiguration}, - verification::{is_importable_header, verify_aura_header}, - AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage, -}; -use bp_eth_poa::{AuraHeader, HeaderId, Receipt}; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; - -/// Imports a bunch of headers and updates blocks finality. -/// -/// Transactions receipts must be provided if `header_import_requires_receipts()` -/// has returned true. -/// If successful, returns tuple where first element is the number of useful headers -/// we have imported and the second element is the number of useless headers (duplicate) -/// we have NOT imported. -/// Returns error if fatal error has occurred during import. Some valid headers may be -/// imported in this case. -/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) -#[allow(clippy::too_many_arguments)] -pub fn import_headers( - storage: &mut S, - pruning_strategy: &mut PS, - aura_config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - submitter: Option, - headers: Vec<(AuraHeader, Option>)>, - chain_time: &CT, - finalized_headers: &mut BTreeMap, -) -> Result<(u64, u64), Error> { - let mut useful = 0; - let mut useless = 0; - for (header, receipts) in headers { - let import_result = import_header( - storage, - pruning_strategy, - aura_config, - validators_config, - submitter.clone(), - header, - chain_time, - receipts, - ); - - match import_result { - Ok((_, finalized)) => { - for (_, submitter) in finalized { - if let Some(submitter) = submitter { - *finalized_headers.entry(submitter).or_default() += 1; - } - } - useful += 1; - }, - Err(Error::AncientHeader) | Err(Error::KnownHeader) => useless += 1, - Err(error) => return Err(error), - } - } - - Ok((useful, useless)) -} - -/// A vector of finalized headers and their submitters. -pub type FinalizedHeaders = Vec<(HeaderId, Option<::Submitter>)>; - -/// Imports given header and updates blocks finality (if required). -/// -/// Transactions receipts must be provided if `header_import_requires_receipts()` -/// has returned true. -/// -/// Returns imported block id and list of all finalized headers. -/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) -#[allow(clippy::too_many_arguments)] -pub fn import_header( - storage: &mut S, - pruning_strategy: &mut PS, - aura_config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - submitter: Option, - header: AuraHeader, - chain_time: &CT, - receipts: Option>, -) -> Result<(HeaderId, FinalizedHeaders), Error> { - // first check that we are able to import this header at all - let (header_id, finalized_id) = is_importable_header(storage, &header)?; - - // verify header - let import_context = verify_aura_header(storage, aura_config, submitter, &header, chain_time)?; - - // check if block schedules new validators - let validators = Validators::new(validators_config); - let (scheduled_change, enacted_change) = - validators.extract_validators_change(&header, receipts)?; - - // check if block finalizes some other blocks and corresponding scheduled validators - let validators_set = import_context.validators_set(); - let finalized_blocks = finalize_blocks( - storage, - finalized_id, - (validators_set.enact_block, &validators_set.validators), - header_id, - import_context.submitter(), - &header, - aura_config.two_thirds_majority_transition, - )?; - let enacted_change = enacted_change - .map(|validators| ChangeToEnact { signal_block: None, validators }) - .or_else(|| { - validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers) - }); - - // NOTE: we can't return Err() from anywhere below this line - // (because otherwise we'll have inconsistent storage if transaction will fail) - - // and finally insert the block - let (best_id, best_total_difficulty) = storage.best_block(); - let total_difficulty = import_context.total_difficulty() + header.difficulty; - let is_best = total_difficulty > best_total_difficulty; - storage.insert_header(import_context.into_import_header( - is_best, - header_id, - header, - total_difficulty, - enacted_change, - scheduled_change, - finalized_blocks.votes, - )); - - // compute upper border of updated pruning range - let new_best_block_id = if is_best { header_id } else { best_id }; - let new_best_finalized_block_id = finalized_blocks.finalized_headers.last().map(|(id, _)| *id); - let pruning_upper_bound = pruning_strategy.pruning_upper_bound( - new_best_block_id.number, - new_best_finalized_block_id.map(|id| id.number).unwrap_or(finalized_id.number), - ); - - // now mark finalized headers && prune old headers - storage.finalize_and_prune_headers(new_best_finalized_block_id, pruning_upper_bound); - - Ok((header_id, finalized_blocks.finalized_headers)) -} - -/// Returns true if transactions receipts are required to import given header. -pub fn header_import_requires_receipts( - storage: &S, - validators_config: &ValidatorsConfiguration, - header: &AuraHeader, -) -> bool { - is_importable_header(storage, header) - .map(|_| Validators::new(validators_config)) - .map(|validators| validators.maybe_signals_validators_change(header)) - .unwrap_or(false) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - run_test, secret_to_address, test_aura_config, test_validators_config, validator, - validators_addresses, validators_change_receipt, HeaderBuilder, - KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT, - }, - validators::ValidatorsSource, - BlocksToPrune, BridgeStorage, Headers, PruningRange, - }; - use libsecp256k1::SecretKey; - - const TOTAL_VALIDATORS: usize = 3; - - #[test] - fn rejects_finalized_block_competitors() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - storage.finalize_and_prune_headers( - Some(HeaderId { number: 100, ..Default::default() }), - 0, - ); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - Default::default(), - &(), - None, - ), - Err(Error::AncientHeader), - ); - }); - } - - #[test] - fn rejects_known_header() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - header.clone(), - &(), - None, - ) - .map(|_| ()), - Ok(()), - ); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - header, - &(), - None, - ) - .map(|_| ()), - Err(Error::KnownHeader), - ); - }); - } - - #[test] - fn import_header_works() { - run_test(TOTAL_VALIDATORS, |ctx| { - let validators_config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(ctx.addresses.clone())), - (1, ValidatorsSource::List(validators_addresses(2))), - ]); - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); - let hash = header.compute_hash(); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - None, - header, - &(), - None - ) - .map(|_| ()), - Ok(()), - ); - - // check that new validators will be used for next header - let imported_header = Headers::::get(&hash).unwrap(); - assert_eq!( - imported_header.next_validators_set_id, - 1, // new set is enacted from config - ); - }); - } - - #[test] - fn headers_are_pruned_during_import() { - run_test(TOTAL_VALIDATORS, |ctx| { - let validators_config = ValidatorsConfiguration::Single(ValidatorsSource::Contract( - [3; 20].into(), - ctx.addresses.clone(), - )); - let validators = vec![validator(0), validator(1), validator(2)]; - let mut storage = BridgeStorage::::new(); - - // header [0..11] are finalizing blocks [0; 9] - // => since we want to keep 10 finalized blocks, we aren't pruning anything - let mut latest_block_id = Default::default(); - for i in 1..11 { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&validators); - let parent_id = header.parent_id().unwrap(); - - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(100), - header, - &(), - None, - ) - .unwrap(); - match i { - 2..=10 => { - assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,) - }, - _ => assert_eq!(finalized_blocks, vec![], "At {}", i), - } - latest_block_id = rolling_last_block_id; - } - assert!(storage.header(&ctx.genesis.compute_hash()).is_some()); - - // header 11 finalizes headers [10] AND schedules change - // => we prune header#0 - let header11 = HeaderBuilder::with_parent_number(10) - .log_bloom((&[0xff; 256]).into()) - .receipts_root( - "ead6c772ba0083bbff497ba0f4efe47c199a2655401096c21ab7450b6c466d97" - .parse() - .unwrap(), - ) - .sign_by_set(&validators); - let parent_id = header11.parent_id().unwrap(); - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(101), - header11.clone(), - &(), - Some(vec![validators_change_receipt(latest_block_id.hash)]), - ) - .unwrap(); - assert_eq!(finalized_blocks, vec![(parent_id, Some(100))],); - assert!(storage.header(&ctx.genesis.compute_hash()).is_none()); - latest_block_id = rolling_last_block_id; - - // and now let's say validators 1 && 2 went offline - // => in the range 12-25 no blocks are finalized, but we still continue to prune old - // headers until header#11 is met. we can't prune #11, because it schedules change - let mut step = 56u64; - let mut expected_blocks = vec![(header11.compute_id(), Some(101))]; - for i in 12..25 { - let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) - .difficulty(i.into()) - .step(step) - .sign_by_set(&validators); - expected_blocks.push((header.compute_id(), Some(102))); - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(102), - header, - &(), - None, - ) - .unwrap(); - assert_eq!(finalized_blocks, vec![],); - latest_block_id = rolling_last_block_id; - step += 3; - } - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 11, oldest_block_to_keep: 14 }, - ); - - // now let's insert block signed by validator 1 - // => blocks 11..24 are finalized and blocks 11..14 are pruned - step -= 2; - let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) - .difficulty(25.into()) - .step(step) - .sign_by_set(&validators); - let (_, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(103), - header, - &(), - None, - ) - .unwrap(); - assert_eq!(finalized_blocks, expected_blocks); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 15, oldest_block_to_keep: 15 }, - ); - }); - } - - fn import_custom_block( - storage: &mut S, - validators: &[SecretKey], - header: AuraHeader, - ) -> Result { - let id = header.compute_id(); - import_header( - storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &ValidatorsConfiguration::Single(ValidatorsSource::Contract( - [0; 20].into(), - validators.iter().map(secret_to_address).collect(), - )), - None, - header, - &(), - None, - ) - .map(|_| id) - } - - #[test] - fn import_of_non_best_block_may_finalize_blocks() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - - // insert headers (H1, validator1), (H2, validator1), (H3, validator1) - // making H3 the best header, without finalizing anything (we need 2 signatures) - let mut expected_best_block = Default::default(); - for i in 1..4 { - let step = 1 + i * TOTAL_VALIDATORS as u64; - expected_best_block = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(i - 1) - .step(step) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - } - let (best_block, best_difficulty) = storage.best_block(); - assert_eq!(best_block, expected_best_block); - assert_eq!(storage.finalized_block(), ctx.genesis.compute_id()); - - // insert headers (H1', validator1), (H2', validator2), finalizing H2, even though H3 - // has better difficulty than H2' (because there are more steps involved) - let mut expected_finalized_block = Default::default(); - let mut parent_hash = ctx.genesis.compute_hash(); - for i in 1..3 { - let step = i; - let id = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(parent_hash) - .step(step) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - parent_hash = id.hash; - if i == 1 { - expected_finalized_block = id; - } - } - let (new_best_block, new_best_difficulty) = storage.best_block(); - assert_eq!(new_best_block, expected_best_block); - assert_eq!(new_best_difficulty, best_difficulty); - assert_eq!(storage.finalized_block(), expected_finalized_block); - }); - } - - #[test] - fn append_to_unfinalized_fork_fails() { - const VALIDATORS: u64 = 5; - run_test(VALIDATORS as usize, |ctx| { - let mut storage = BridgeStorage::::new(); - - // header1, authored by validator[2] is best common block between two competing forks - let header1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(0).step(2).sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header1); - assert_eq!(storage.finalized_block().number, 0); - - // validator[3] has authored header2 (nothing is finalized yet) - let header2 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1).step(3).sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header2); - assert_eq!(storage.finalized_block().number, 0); - - // validator[4] has authored header3 (header1 is finalized) - let header3 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(2).step(4).sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header3); - assert_eq!(storage.finalized_block(), header1); - - // validator[4] has authored 4 blocks: header2'...header5' (header1 is still finalized) - let header2_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .step(4) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header3_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header2_1.hash) - .step(4 + VALIDATORS) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header4_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header3_1.hash) - .step(4 + VALIDATORS * 2) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header5_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header4_1.hash) - .step(4 + VALIDATORS * 3) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5_1); - assert_eq!(storage.finalized_block(), header1); - - // when we import header4 { parent = header3 }, authored by validator[0], header2 is - // finalized - let header4 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(3).step(5).sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5_1); - assert_eq!(storage.finalized_block(), header2); - - // when we import header5 { parent = header4 }, authored by validator[1], header3 is - // finalized - let header5 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header4.hash) - .step(6) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5); - assert_eq!(storage.finalized_block(), header3); - - // import of header2'' { parent = header1 } fails, because it has number < - // best_finalized - assert_eq!( - import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .step(3) - .sign_by_set(&ctx.validators) - ), - Err(Error::AncientHeader), - ); - - // import of header6' should also fail because we're trying to append to fork thas - // has forked before finalized block - assert_eq!( - import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(5) - .gas_limit((GAS_LIMIT + 1).into()) - .step(5 + VALIDATORS * 4) - .sign_by_set(&ctx.validators), - ), - Err(Error::TryingToFinalizeSibling), - ); - }); - } -} diff --git a/bridges/modules/ethereum/src/lib.rs b/bridges/modules/ethereum/src/lib.rs deleted file mode 100644 index 4224818ad96c..000000000000 --- a/bridges/modules/ethereum/src/lib.rs +++ /dev/null @@ -1,1572 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use crate::finality::{CachedFinalityVotes, FinalityVotes}; -use bp_eth_poa::{ - Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256, -}; -use codec::{Decode, Encode}; -use frame_support::traits::Get; -use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::{boxed::Box, cmp::Ord, collections::btree_map::BTreeMap, prelude::*}; - -pub use validators::{ValidatorsConfiguration, ValidatorsSource}; - -mod error; -mod finality; -mod import; -mod validators; -mod verification; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; - -#[cfg(test)] -mod mock; - -#[cfg(any(feature = "runtime-benchmarks", test))] -pub mod test_utils; - -/// Maximal number of blocks we're pruning in single import call. -const MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT: u64 = 8; - -/// Authority round engine configuration parameters. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct AuraConfiguration { - /// Empty step messages transition block. - pub empty_steps_transition: u64, - /// Transition block to strict empty steps validation. - pub strict_empty_steps_transition: u64, - /// Monotonic step validation transition block. - pub validate_step_transition: u64, - /// Chain score validation transition block. - pub validate_score_transition: u64, - /// First block for which a 2/3 quorum (instead of 1/2) is required. - pub two_thirds_majority_transition: u64, - /// Minimum gas limit. - pub min_gas_limit: U256, - /// Maximum gas limit. - pub max_gas_limit: U256, - /// Maximum size of extra data. - pub maximum_extra_data_size: u64, -} - -/// Transaction pool configuration. -/// -/// This is used to limit number of unsigned headers transactions in -/// the pool. We never use it to verify signed transactions. -pub struct PoolConfiguration { - /// Maximal difference between number of header from unsigned transaction - /// and current best block. This must be selected with caution - the more - /// is the difference, the more (potentially invalid) transactions could be - /// accepted to the pool and mined later (filling blocks with spam). - pub max_future_number_difference: u64, -} - -/// Block header as it is stored in the runtime storage. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct StoredHeader { - /// Submitter of this header. May be `None` if header has been submitted - /// using unsigned transaction. - pub submitter: Option, - /// The block header itself. - pub header: AuraHeader, - /// Total difficulty of the chain. - pub total_difficulty: U256, - /// The ID of set of validators that is expected to produce direct descendants of - /// this block. If header enacts new set, this would be the new set. Otherwise - /// this is the set that has produced the block itself. - /// The hash is the hash of block where validators set has been enacted. - pub next_validators_set_id: u64, - /// Hash of the last block which has **SCHEDULED** validators set change. - /// Note that signal doesn't mean that the set has been (or ever will be) enacted. - /// Note that the header may already be pruned. - pub last_signal_block: Option, -} - -/// Validators set as it is stored in the runtime storage. -#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -#[cfg_attr(test, derive(Clone))] -pub struct ValidatorsSet { - /// Validators of this set. - pub validators: Vec
, - /// Hash of the block where this set has been signaled. None if this is the first set. - pub signal_block: Option, - /// Hash of the block where this set has been enacted. - pub enact_block: HeaderId, -} - -/// Validators set change as it is stored in the runtime storage. -#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -#[cfg_attr(test, derive(Clone))] -pub struct AuraScheduledChange { - /// Validators of this set. - pub validators: Vec
, - /// Hash of the block which has emitted previous validators change signal. - pub prev_signal_block: Option, -} - -/// Header that we're importing. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct HeaderToImport { - /// Header import context, - pub context: ImportContext, - /// Should we consider this header as best? - pub is_best: bool, - /// The id of the header. - pub id: HeaderId, - /// The header itself. - pub header: AuraHeader, - /// Total chain difficulty at the header. - pub total_difficulty: U256, - /// New validators set and the hash of block where it has been scheduled (if applicable). - /// Some if set is is enacted by this header. - pub enacted_change: Option, - /// Validators set scheduled change, if happened at the header. - pub scheduled_change: Option>, - /// Finality votes at this header. - pub finality_votes: FinalityVotes, -} - -/// Header that we're importing. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct ChangeToEnact { - /// The id of the header where change has been scheduled. - /// None if it is a first set within current `ValidatorsSource`. - pub signal_block: Option, - /// Validators set that is enacted. - pub validators: Vec
, -} - -/// Blocks range that we want to prune. -#[derive(Encode, Decode, Default, RuntimeDebug, Clone, PartialEq, TypeInfo)] -struct PruningRange { - /// Number of the oldest unpruned block(s). This might be the block that we do not - /// want to prune now (then it is equal to `oldest_block_to_keep`), or block that we - /// were unable to prune for whatever reason (i.e. if it isn't finalized yet and has - /// scheduled validators set change). - pub oldest_unpruned_block: u64, - /// Number of the oldest block(s) that we want to keep. We want to prune blocks in range - /// [ `oldest_unpruned_block`; `oldest_block_to_keep` ). - pub oldest_block_to_keep: u64, -} - -/// Header import context. -/// -/// The import context contains information needed by the header verification -/// pipeline which is not directly part of the header being imported. This includes -/// information relating to its parent, and the current validator set (which -/// provide _context_ for the current header). -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct ImportContext { - submitter: Option, - parent_hash: H256, - parent_header: AuraHeader, - parent_total_difficulty: U256, - parent_scheduled_change: Option, - validators_set_id: u64, - validators_set: ValidatorsSet, - last_signal_block: Option, -} - -impl ImportContext { - /// Returns reference to header submitter (if known). - pub fn submitter(&self) -> Option<&Submitter> { - self.submitter.as_ref() - } - - /// Returns reference to parent header. - pub fn parent_header(&self) -> &AuraHeader { - &self.parent_header - } - - /// Returns total chain difficulty at parent block. - pub fn total_difficulty(&self) -> &U256 { - &self.parent_total_difficulty - } - - /// Returns the validator set change if the parent header has signaled a change. - pub fn parent_scheduled_change(&self) -> Option<&AuraScheduledChange> { - self.parent_scheduled_change.as_ref() - } - - /// Returns id of the set of validators. - pub fn validators_set_id(&self) -> u64 { - self.validators_set_id - } - - /// Returns reference to validators set for the block we're going to import. - pub fn validators_set(&self) -> &ValidatorsSet { - &self.validators_set - } - - /// Returns reference to the latest block which has signaled change of validators set. - /// This may point to parent if parent has signaled change. - pub fn last_signal_block(&self) -> Option { - match self.parent_scheduled_change { - Some(_) => Some(HeaderId { number: self.parent_header.number, hash: self.parent_hash }), - None => self.last_signal_block, - } - } - - /// Converts import context into header we're going to import. - #[allow(clippy::too_many_arguments)] - pub fn into_import_header( - self, - is_best: bool, - id: HeaderId, - header: AuraHeader, - total_difficulty: U256, - enacted_change: Option, - scheduled_change: Option>, - finality_votes: FinalityVotes, - ) -> HeaderToImport { - HeaderToImport { - context: self, - is_best, - id, - header, - total_difficulty, - enacted_change, - scheduled_change, - finality_votes, - } - } -} - -/// The storage that is used by the client. -/// -/// Storage modification must be discarded if block import has failed. -pub trait Storage { - /// Header submitter identifier. - type Submitter: Clone + Ord; - - /// Get best known block and total chain difficulty. - fn best_block(&self) -> (HeaderId, U256); - /// Get last finalized block. - fn finalized_block(&self) -> HeaderId; - /// Get imported header by its hash. - /// - /// Returns header and its submitter (if known). - fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)>; - /// Returns latest cached finality votes (if any) for block ancestors, starting - /// from `parent_hash` block and stopping at genesis block, best finalized block - /// or block where `stop_at` returns true. - fn cached_finality_votes( - &self, - parent: &HeaderId, - best_finalized: &HeaderId, - stop_at: impl Fn(&H256) -> bool, - ) -> CachedFinalityVotes; - /// Get header import context by parent header hash. - fn import_context( - &self, - submitter: Option, - parent_hash: &H256, - ) -> Option>; - /// Get new validators that are scheduled by given header and hash of the previous - /// block that has scheduled change. - fn scheduled_change(&self, hash: &H256) -> Option; - /// Insert imported header. - fn insert_header(&mut self, header: HeaderToImport); - /// Finalize given block and schedules pruning of all headers - /// with number < prune_end. - /// - /// The headers in the pruning range could be either finalized, or not. - /// It is the storage duty to ensure that unfinalized headers that have - /// scheduled changes won't be pruned until they or their competitors - /// are finalized. - fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64); -} - -/// Headers pruning strategy. -pub trait PruningStrategy: Default { - /// Return upper bound (exclusive) of headers pruning range. - /// - /// Every value that is returned from this function, must be greater or equal to the - /// previous value. Otherwise it will be ignored (we can't revert pruning). - /// - /// Pallet may prune both finalized and unfinalized blocks. But it can't give any - /// guarantees on when it will happen. Example: if some unfinalized block at height N - /// has scheduled validators set change, then the module won't prune any blocks with - /// number greater than or equal to N even if strategy allows that. - /// - /// If your strategy allows pruning unfinalized blocks, this could lead to switch - /// between finalized forks (only if authorities are misbehaving). But since 50 percent plus one - /// (or 2/3) authorities are able to do whatever they want with the chain, this isn't considered - /// fatal. If your strategy only prunes finalized blocks, we'll never be able to finalize - /// header that isn't descendant of current best finalized block. - fn pruning_upper_bound(&mut self, best_number: u64, best_finalized_number: u64) -> u64; -} - -/// ChainTime represents the runtime on-chain time -pub trait ChainTime: Default { - /// Is a header timestamp ahead of the current on-chain time. - /// - /// Check whether `timestamp` is ahead (i.e greater than) the current on-chain - /// time. If so, return `true`, `false` otherwise. - fn is_timestamp_ahead(&self, timestamp: u64) -> bool; -} - -/// ChainTime implementation for the empty type. -/// -/// This implementation will allow a runtime without the timestamp pallet to use -/// the empty type as its ChainTime associated type. -impl ChainTime for () { - fn is_timestamp_ahead(&self, _: u64) -> bool { - false - } -} - -/// Callbacks for header submission rewards/penalties. -pub trait OnHeadersSubmitted { - /// Called when valid headers have been submitted. - /// - /// The submitter **must not** be rewarded for submitting valid headers, because greedy - /// authority could produce and submit multiple valid headers (without relaying them to other - /// peers) and get rewarded. Instead, the provider could track submitters and stop rewarding if - /// too many headers have been submitted without finalization. - fn on_valid_headers_submitted(submitter: AccountId, useful: u64, useless: u64); - /// Called when invalid headers have been submitted. - fn on_invalid_headers_submitted(submitter: AccountId); - /// Called when earlier submitted headers have been finalized. - /// - /// finalized is the number of headers that submitter has submitted and which - /// have been finalized. - fn on_valid_headers_finalized(submitter: AccountId, finalized: u64); -} - -impl OnHeadersSubmitted for () { - fn on_valid_headers_submitted(_submitter: AccountId, _useful: u64, _useless: u64) {} - fn on_invalid_headers_submitted(_submitter: AccountId) {} - fn on_valid_headers_finalized(_submitter: AccountId, _finalized: u64) {} -} - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// Aura configuration. - type AuraConfiguration: Get; - /// Validators configuration. - type ValidatorsConfiguration: Get; - - /// Interval (in blocks) for for finality votes caching. - /// If None, cache is disabled. - /// - /// Ideally, this should either be None (when we are sure that there won't - /// be any significant finalization delays), or something that is bit larger - /// than average finalization delay. - type FinalityVotesCachingInterval: Get>; - /// Headers pruning strategy. - type PruningStrategy: PruningStrategy; - /// Header timestamp verification against current on-chain time. - type ChainTime: ChainTime; - - /// Handler for headers submission result. - type OnHeadersSubmitted: OnHeadersSubmitted; - } - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet {} - - #[pallet::call] - impl, I: 'static> Pallet { - /// Import single Aura header. Requires transaction to be **UNSIGNED**. - #[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_unsigned_header( - origin: OriginFor, - header: Box, - receipts: Option>, - ) -> DispatchResult { - frame_system::ensure_none(origin)?; - - import::import_header( - &mut BridgeStorage::::new(), - &mut T::PruningStrategy::default(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - None, - *header, - &T::ChainTime::default(), - receipts, - ) - .map_err(|e| e.msg())?; - - Ok(()) - } - - /// Import Aura chain headers in a single **SIGNED** transaction. - /// Ignores non-fatal errors (like when known header is provided), rewards - /// for successful headers import and penalizes for fatal errors. - /// - /// This should be used with caution - passing too many headers could lead to - /// enormous block production/import time. - #[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_signed_headers( - origin: OriginFor, - headers_with_receipts: Vec<(AuraHeader, Option>)>, - ) -> DispatchResult { - let submitter = frame_system::ensure_signed(origin)?; - let mut finalized_headers = BTreeMap::new(); - let import_result = import::import_headers( - &mut BridgeStorage::::new(), - &mut T::PruningStrategy::default(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - Some(submitter.clone()), - headers_with_receipts, - &T::ChainTime::default(), - &mut finalized_headers, - ); - - // if we have finalized some headers, we will reward their submitters even - // if current submitter has provided some invalid headers - for (f_submitter, f_count) in finalized_headers { - T::OnHeadersSubmitted::on_valid_headers_finalized(f_submitter, f_count); - } - - // now track/penalize current submitter for providing new headers - match import_result { - Ok((useful, useless)) => - T::OnHeadersSubmitted::on_valid_headers_submitted(submitter, useful, useless), - Err(error) => { - // even though we may have accept some headers, we do not want to reward someone - // who provides invalid headers - T::OnHeadersSubmitted::on_invalid_headers_submitted(submitter); - return Err(error.msg().into()) - }, - } - - Ok(()) - } - } - - #[pallet::validate_unsigned] - impl, I: 'static> ValidateUnsigned for Pallet { - type Call = Call; - - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - match *call { - Self::Call::import_unsigned_header { ref header, ref receipts } => { - let accept_result = verification::accept_aura_header_into_pool( - &BridgeStorage::::new(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - &pool_configuration(), - header, - &T::ChainTime::default(), - receipts.as_ref(), - ); - - match accept_result { - Ok((requires, provides)) => Ok(ValidTransaction { - priority: TransactionPriority::max_value(), - requires, - provides, - longevity: TransactionLongevity::max_value(), - propagate: true, - }), - // UnsignedTooFarInTheFuture is the special error code used to limit - // number of transactions in the pool - we do not want to ban transaction - // in this case (see verification.rs for details) - Err(error::Error::UnsignedTooFarInTheFuture) => UnknownTransaction::Custom( - error::Error::UnsignedTooFarInTheFuture.code(), - ) - .into(), - Err(error) => InvalidTransaction::Custom(error.code()).into(), - } - }, - _ => InvalidTransaction::Call.into(), - } - } - } - - /// Best known block. - #[pallet::storage] - pub(super) type BestBlock, I: 'static = ()> = - StorageValue<_, (HeaderId, U256), ValueQuery>; - - /// Best finalized block. - #[pallet::storage] - pub(super) type FinalizedBlock, I: 'static = ()> = - StorageValue<_, HeaderId, ValueQuery>; - - /// Range of blocks that we want to prune. - #[pallet::storage] - pub(super) type BlocksToPrune, I: 'static = ()> = - StorageValue<_, PruningRange, ValueQuery>; - - /// Map of imported headers by hash. - #[pallet::storage] - pub(super) type Headers, I: 'static = ()> = - StorageMap<_, Identity, H256, StoredHeader>; - - /// Map of imported header hashes by number. - #[pallet::storage] - pub(super) type HeadersByNumber, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, u64, Vec>; - - /// Map of cached finality data by header hash. - #[pallet::storage] - pub(super) type FinalityCache, I: 'static = ()> = - StorageMap<_, Identity, H256, FinalityVotes>; - - /// The ID of next validator set. - #[pallet::storage] - pub(super) type NextValidatorsSetId, I: 'static = ()> = - StorageValue<_, u64, ValueQuery>; - - /// Map of validators sets by their id. - #[pallet::storage] - pub(super) type ValidatorsSets, I: 'static = ()> = - StorageMap<_, Twox64Concat, u64, ValidatorsSet>; - - /// Validators sets reference count. Each header that is authored by this set increases - /// the reference count. When we prune this header, we decrease the reference count. - /// When it reaches zero, we are free to prune validator set as well. - #[pallet::storage] - pub(super) type ValidatorsSetsRc, I: 'static = ()> = - StorageMap<_, Twox64Concat, u64, u64>; - - /// Map of validators set changes scheduled by given header. - #[pallet::storage] - pub(super) type ScheduledChanges, I: 'static = ()> = - StorageMap<_, Identity, H256, AuraScheduledChange>; - - #[pallet::genesis_config] - #[cfg_attr(feature = "std", derive(Default))] - pub struct GenesisConfig { - /// PoA header to start with. - pub initial_header: AuraHeader, - /// Initial PoA chain difficulty. - pub initial_difficulty: U256, - /// Initial PoA validators set. - pub initial_validators: Vec
, - } - - #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { - fn build(&self) { - // the initial blocks should be selected so that: - // 1) it doesn't signal validators changes; - // 2) there are no scheduled validators changes from previous blocks; - // 3) (implied) all direct children of initial block are authored by the same validators - // set. - - assert!(!self.initial_validators.is_empty(), "Initial validators set can't be empty",); - - initialize_storage::( - &self.initial_header, - self.initial_difficulty, - &self.initial_validators, - ); - } - } -} - -impl, I: 'static> Pallet { - /// Returns number and hash of the best block known to the bridge module. - /// The caller should only submit `import_header` transaction that makes - /// (or leads to making) other header the best one. - pub fn best_block() -> HeaderId { - BridgeStorage::::new().best_block().0 - } - - /// Returns number and hash of the best finalized block known to the bridge module. - pub fn finalized_block() -> HeaderId { - BridgeStorage::::new().finalized_block() - } - - /// Returns true if the import of given block requires transactions receipts. - pub fn is_import_requires_receipts(header: AuraHeader) -> bool { - import::header_import_requires_receipts( - &BridgeStorage::::new(), - &T::ValidatorsConfiguration::get(), - &header, - ) - } - - /// Returns true if header is known to the runtime. - pub fn is_known_block(hash: H256) -> bool { - BridgeStorage::::new().header(&hash).is_some() - } - - /// Verify that transaction is included into given finalized block. - pub fn verify_transaction_finalized( - block: H256, - tx_index: u64, - proof: &[(RawTransaction, RawTransactionReceipt)], - ) -> bool { - crate::verify_transaction_finalized(&BridgeStorage::::new(), block, tx_index, proof) - } -} - -/// Runtime bridge storage. -#[derive(Default)] -pub struct BridgeStorage(sp_std::marker::PhantomData<(T, I)>); - -impl, I: 'static> BridgeStorage { - /// Create new BridgeStorage. - pub fn new() -> Self { - BridgeStorage(sp_std::marker::PhantomData::<(T, I)>::default()) - } - - /// Prune old blocks. - fn prune_blocks(&self, mut max_blocks_to_prune: u64, finalized_number: u64, prune_end: u64) { - let pruning_range = BlocksToPrune::::get(); - let mut new_pruning_range = pruning_range.clone(); - - // update oldest block we want to keep - if prune_end > new_pruning_range.oldest_block_to_keep { - new_pruning_range.oldest_block_to_keep = prune_end; - } - - // start pruning blocks - let begin = new_pruning_range.oldest_unpruned_block; - let end = new_pruning_range.oldest_block_to_keep; - log::trace!(target: "runtime", "Pruning blocks in range [{}..{})", begin, end); - for number in begin..end { - // if we can't prune anything => break - if max_blocks_to_prune == 0 { - break - } - - // read hashes of blocks with given number and try to prune these blocks - let blocks_at_number = HeadersByNumber::::take(number); - if let Some(mut blocks_at_number) = blocks_at_number { - self.prune_blocks_by_hashes( - &mut max_blocks_to_prune, - finalized_number, - number, - &mut blocks_at_number, - ); - - // if we haven't pruned all blocks, remember unpruned - if !blocks_at_number.is_empty() { - HeadersByNumber::::insert(number, blocks_at_number); - break - } - } - - // we have pruned all headers at number - new_pruning_range.oldest_unpruned_block = number + 1; - log::trace!( - target: "runtime", - "Oldest unpruned PoA header is now: {}", - new_pruning_range.oldest_unpruned_block, - ); - } - - // update pruning range in storage - if pruning_range != new_pruning_range { - BlocksToPrune::::put(new_pruning_range); - } - } - - /// Prune old blocks with given hashes. - fn prune_blocks_by_hashes( - &self, - max_blocks_to_prune: &mut u64, - finalized_number: u64, - number: u64, - blocks_at_number: &mut Vec, - ) { - // ensure that unfinalized headers we want to prune do not have scheduled changes - if number > finalized_number && - blocks_at_number.iter().any(ScheduledChanges::::contains_key) - { - return - } - - // physically remove headers and (probably) obsolete validators sets - while let Some(hash) = blocks_at_number.pop() { - let header = Headers::::take(&hash); - log::trace!( - target: "runtime", - "Pruning PoA header: ({}, {})", - number, - hash, - ); - - ScheduledChanges::::remove(hash); - FinalityCache::::remove(hash); - if let Some(header) = header { - ValidatorsSetsRc::::mutate(header.next_validators_set_id, |rc| match *rc { - Some(rc) if rc > 1 => Some(rc - 1), - _ => None, - }); - } - - // check if we have already pruned too much headers in this call - *max_blocks_to_prune -= 1; - if *max_blocks_to_prune == 0 { - return - } - } - } -} - -impl, I: 'static> Storage for BridgeStorage { - type Submitter = T::AccountId; - - fn best_block(&self) -> (HeaderId, U256) { - BestBlock::::get() - } - - fn finalized_block(&self) -> HeaderId { - FinalizedBlock::::get() - } - - fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)> { - Headers::::get(hash).map(|header| (header.header, header.submitter)) - } - - fn cached_finality_votes( - &self, - parent: &HeaderId, - best_finalized: &HeaderId, - stop_at: impl Fn(&H256) -> bool, - ) -> CachedFinalityVotes { - let mut votes = CachedFinalityVotes::default(); - let mut current_id = *parent; - loop { - // if we have reached finalized block's sibling => stop with special signal - if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash - { - votes.stopped_at_finalized_sibling = true; - return votes - } - - // if we have reached target header => stop - if stop_at(¤t_id.hash) { - return votes - } - - // if we have found cached votes => stop - let cached_votes = FinalityCache::::get(¤t_id.hash); - if let Some(cached_votes) = cached_votes { - votes.votes = Some(cached_votes); - return votes - } - - // read next parent header id - let header = match Headers::::get(¤t_id.hash) { - Some(header) if header.header.number != 0 => header, - _ => return votes, - }; - let parent_id = header.header.parent_id().expect( - "only returns None at genesis header;\ - the header is proved to have number > 0;\ - qed", - ); - - votes - .unaccounted_ancestry - .push_back((current_id, header.submitter, header.header)); - - current_id = parent_id; - } - } - - fn import_context( - &self, - submitter: Option, - parent_hash: &H256, - ) -> Option> { - Headers::::get(parent_hash).map(|parent_header| { - let validators_set = ValidatorsSets::::get(parent_header.next_validators_set_id) - .expect( - "validators set is only pruned when last ref is pruned; there is a ref; qed", - ); - let parent_scheduled_change = ScheduledChanges::::get(parent_hash); - ImportContext { - submitter, - parent_hash: *parent_hash, - parent_header: parent_header.header, - parent_total_difficulty: parent_header.total_difficulty, - parent_scheduled_change, - validators_set_id: parent_header.next_validators_set_id, - validators_set, - last_signal_block: parent_header.last_signal_block, - } - }) - } - - fn scheduled_change(&self, hash: &H256) -> Option { - ScheduledChanges::::get(hash) - } - - fn insert_header(&mut self, header: HeaderToImport) { - if header.is_best { - BestBlock::::put((header.id, header.total_difficulty)); - } - if let Some(scheduled_change) = header.scheduled_change { - ScheduledChanges::::insert( - &header.id.hash, - AuraScheduledChange { - validators: scheduled_change, - prev_signal_block: header.context.last_signal_block, - }, - ); - } - let next_validators_set_id = match header.enacted_change { - Some(enacted_change) => { - let next_validators_set_id = NextValidatorsSetId::::mutate(|set_id| { - let next_set_id = *set_id; - *set_id += 1; - next_set_id - }); - ValidatorsSets::::insert( - next_validators_set_id, - ValidatorsSet { - validators: enacted_change.validators, - enact_block: header.id, - signal_block: enacted_change.signal_block, - }, - ); - ValidatorsSetsRc::::insert(next_validators_set_id, 1); - next_validators_set_id - }, - None => { - ValidatorsSetsRc::::mutate(header.context.validators_set_id, |rc| { - *rc = Some(rc.map(|rc| rc + 1).unwrap_or(1)); - *rc - }); - header.context.validators_set_id - }, - }; - - let finality_votes_caching_interval = T::FinalityVotesCachingInterval::get(); - if let Some(finality_votes_caching_interval) = finality_votes_caching_interval { - let cache_entry_required = - header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0; - if cache_entry_required { - FinalityCache::::insert(header.id.hash, header.finality_votes); - } - } - - log::trace!( - target: "runtime", - "Inserting PoA header: ({}, {})", - header.header.number, - header.id.hash, - ); - - let last_signal_block = header.context.last_signal_block(); - HeadersByNumber::::append(header.id.number, header.id.hash); - Headers::::insert( - &header.id.hash, - StoredHeader { - submitter: header.context.submitter, - header: header.header, - total_difficulty: header.total_difficulty, - next_validators_set_id, - last_signal_block, - }, - ); - } - - fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64) { - // remember just finalized block - let finalized_number = finalized - .as_ref() - .map(|f| f.number) - .unwrap_or_else(|| FinalizedBlock::::get().number); - if let Some(finalized) = finalized { - log::trace!( - target: "runtime", - "Finalizing PoA header: ({}, {})", - finalized.number, - finalized.hash, - ); - - FinalizedBlock::::put(finalized); - } - - // and now prune headers if we need to - self.prune_blocks(MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT, finalized_number, prune_end); - } -} - -/// Initialize storage. -#[cfg(any(feature = "std", feature = "runtime-benchmarks"))] -pub(crate) fn initialize_storage, I: 'static>( - initial_header: &AuraHeader, - initial_difficulty: U256, - initial_validators: &[Address], -) { - let initial_hash = initial_header.compute_hash(); - log::trace!( - target: "runtime", - "Initializing bridge with PoA header: ({}, {})", - initial_header.number, - initial_hash, - ); - - let initial_id = HeaderId { number: initial_header.number, hash: initial_hash }; - BestBlock::::put((initial_id, initial_difficulty)); - FinalizedBlock::::put(initial_id); - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: initial_header.number, - oldest_block_to_keep: initial_header.number, - }); - HeadersByNumber::::insert(initial_header.number, vec![initial_hash]); - Headers::::insert( - initial_hash, - StoredHeader { - submitter: None, - header: initial_header.clone(), - total_difficulty: initial_difficulty, - next_validators_set_id: 0, - last_signal_block: None, - }, - ); - NextValidatorsSetId::::put(1); - ValidatorsSets::::insert( - 0, - ValidatorsSet { - validators: initial_validators.to_vec(), - signal_block: None, - enact_block: initial_id, - }, - ); - ValidatorsSetsRc::::insert(0, 1); -} - -/// Verify that transaction is included into given finalized block. -pub fn verify_transaction_finalized( - storage: &S, - block: H256, - tx_index: u64, - proof: &[(RawTransaction, RawTransactionReceipt)], -) -> bool { - if tx_index >= proof.len() as _ { - log::trace!( - target: "runtime", - "Tx finality check failed: transaction index ({}) is larger than number of transactions ({})", - tx_index, - proof.len(), - ); - - return false - } - - let header = match storage.header(&block) { - Some((header, _)) => header, - None => { - log::trace!( - target: "runtime", - "Tx finality check failed: can't find header in the storage: {}", - block, - ); - - return false - }, - }; - let finalized = storage.finalized_block(); - - // if header is not yet finalized => return - if header.number > finalized.number { - log::trace!( - target: "runtime", - "Tx finality check failed: header {}/{} is not finalized. Best finalized: {}", - header.number, - block, - finalized.number, - ); - - return false - } - - // check if header is actually finalized - let is_finalized = match header.number < finalized.number { - true => ancestry(storage, finalized.hash) - .skip_while(|(_, ancestor)| ancestor.number > header.number) - .any(|(ancestor_hash, _)| ancestor_hash == block), - false => block == finalized.hash, - }; - if !is_finalized { - log::trace!( - target: "runtime", - "Tx finality check failed: header {} is not finalized: no canonical path to best finalized block {}", - block, - finalized.hash, - ); - - return false - } - - // verify that transaction is included in the block - if let Err(computed_root) = header.check_transactions_root(proof.iter().map(|(tx, _)| tx)) { - log::trace!( - target: "runtime", - "Tx finality check failed: transactions root mismatch. Expected: {}, computed: {}", - header.transactions_root, - computed_root, - ); - - return false - } - - // verify that transaction receipt is included in the block - if let Err(computed_root) = header.check_raw_receipts_root(proof.iter().map(|(_, r)| r)) { - log::trace!( - target: "runtime", - "Tx finality check failed: receipts root mismatch. Expected: {}, computed: {}", - header.receipts_root, - computed_root, - ); - - return false - } - - // check that transaction has completed successfully - let is_successful_raw_receipt = Receipt::is_successful_raw_receipt(&proof[tx_index as usize].1); - match is_successful_raw_receipt { - Ok(true) => true, - Ok(false) => { - log::trace!( - target: "runtime", - "Tx finality check failed: receipt shows that transaction has failed", - ); - - false - }, - Err(err) => { - log::trace!( - target: "runtime", - "Tx finality check failed: receipt check has failed: {}", - err, - ); - - false - }, - } -} - -/// Transaction pool configuration. -fn pool_configuration() -> PoolConfiguration { - PoolConfiguration { max_future_number_difference: 10 } -} - -/// Return iterator of given header ancestors. -fn ancestry( - storage: &'_ S, - mut parent_hash: H256, -) -> impl Iterator + '_ { - sp_std::iter::from_fn(move || { - let (header, _) = storage.header(&parent_hash)?; - if header.number == 0 { - return None - } - - let hash = parent_hash; - parent_hash = header.parent_hash; - Some((hash, header)) - }) -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::{ - finality::FinalityAncestor, - mock::{ - genesis, insert_header, run_test, run_test_with_genesis, validators_addresses, - HeaderBuilder, TestRuntime, GAS_LIMIT, - }, - test_utils::validator_utils::*, - }; - use bp_eth_poa::compute_merkle_root; - - const TOTAL_VALIDATORS: usize = 3; - - fn example_tx() -> Vec { - vec![42] - } - - fn example_tx_receipt(success: bool) -> Vec { - Receipt { - // the only thing that we care of: - outcome: bp_eth_poa::TransactionOutcome::StatusCode(if success { 1 } else { 0 }), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - } - - fn example_header_with_failed_receipt() -> AuraHeader { - HeaderBuilder::with_parent(&example_header()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(false)].into_iter())) - .sign_by(&validator(0)) - } - - fn example_header() -> AuraHeader { - HeaderBuilder::with_parent(&example_header_parent()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) - .sign_by(&validator(0)) - } - - fn example_header_parent() -> AuraHeader { - HeaderBuilder::with_parent(&genesis()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) - .sign_by(&validator(0)) - } - - fn with_headers_to_prune(f: impl Fn(BridgeStorage) -> T) -> T { - run_test(TOTAL_VALIDATORS, |ctx| { - for i in 1..10 { - let mut headers_by_number = Vec::with_capacity(5); - for j in 0..5 { - let header = HeaderBuilder::with_parent_number(i - 1) - .gas_limit((GAS_LIMIT + j).into()) - .sign_by_set(&ctx.validators); - let hash = header.compute_hash(); - headers_by_number.push(hash); - Headers::::insert( - hash, - StoredHeader { - submitter: None, - header, - total_difficulty: 0.into(), - next_validators_set_id: 0, - last_signal_block: None, - }, - ); - - if i == 7 && j == 1 { - ScheduledChanges::::insert( - hash, - AuraScheduledChange { - validators: validators_addresses(5), - prev_signal_block: None, - }, - ); - } - } - HeadersByNumber::::insert(i, headers_by_number); - } - - f(BridgeStorage::new()) - }) - } - - #[test] - fn blocks_are_not_pruned_if_range_is_empty() { - with_headers_to_prune(|storage| { - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 5, - oldest_block_to_keep: 5, - }); - - // try to prune blocks [5; 10) - storage.prune_blocks(0xFFFF, 10, 5); - assert_eq!(HeadersByNumber::::get(&5).unwrap().len(), 5); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 5, oldest_block_to_keep: 5 }, - ); - }); - } - - #[test] - fn blocks_to_prune_never_shrinks_from_the_end() { - with_headers_to_prune(|storage| { - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: 5, - }); - - // try to prune blocks [5; 10) - storage.prune_blocks(0xFFFF, 10, 3); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 5, oldest_block_to_keep: 5 }, - ); - }); - } - - #[test] - fn blocks_are_not_pruned_if_limit_is_zero() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - storage.prune_blocks(0, 10, 10); - assert!(HeadersByNumber::::get(&0).is_some()); - assert!(HeadersByNumber::::get(&1).is_some()); - assert!(HeadersByNumber::::get(&2).is_some()); - assert!(HeadersByNumber::::get(&3).is_some()); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 0, oldest_block_to_keep: 10 }, - ); - }); - } - - #[test] - fn blocks_are_pruned_if_limit_is_non_zero() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - storage.prune_blocks(7, 10, 10); - // 1 headers with number = 0 is pruned (1 total) - assert!(HeadersByNumber::::get(&0).is_none()); - // 5 headers with number = 1 are pruned (6 total) - assert!(HeadersByNumber::::get(&1).is_none()); - // 1 header with number = 2 are pruned (7 total) - assert_eq!(HeadersByNumber::::get(&2).unwrap().len(), 4); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 2, oldest_block_to_keep: 10 }, - ); - - // try to prune blocks [2; 10) - storage.prune_blocks(11, 10, 10); - // 4 headers with number = 2 are pruned (4 total) - assert!(HeadersByNumber::::get(&2).is_none()); - // 5 headers with number = 3 are pruned (9 total) - assert!(HeadersByNumber::::get(&3).is_none()); - // 2 headers with number = 4 are pruned (11 total) - assert_eq!(HeadersByNumber::::get(&4).unwrap().len(), 3); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 4, oldest_block_to_keep: 10 }, - ); - }); - } - - #[test] - fn pruning_stops_on_unfainalized_block_with_scheduled_change() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - // last finalized block is 5 - // and one of blocks#7 has scheduled change - // => we won't prune any block#7 at all - storage.prune_blocks(0xFFFF, 5, 10); - assert!(HeadersByNumber::::get(&0).is_none()); - assert!(HeadersByNumber::::get(&1).is_none()); - assert!(HeadersByNumber::::get(&2).is_none()); - assert!(HeadersByNumber::::get(&3).is_none()); - assert!(HeadersByNumber::::get(&4).is_none()); - assert!(HeadersByNumber::::get(&5).is_none()); - assert!(HeadersByNumber::::get(&6).is_none()); - assert_eq!(HeadersByNumber::::get(&7).unwrap().len(), 5); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { oldest_unpruned_block: 7, oldest_block_to_keep: 10 }, - ); - }); - } - - #[test] - fn finality_votes_are_cached() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - let interval = ::FinalityVotesCachingInterval::get().unwrap(); - - // for all headers with number < interval, cache entry is not created - for i in 1..interval { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); - let id = header.compute_id(); - insert_header(&mut storage, header); - assert_eq!(FinalityCache::::get(&id.hash), None); - } - - // for header with number = interval, cache entry is created - let header_with_entry = - HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators); - let header_with_entry_hash = header_with_entry.compute_hash(); - insert_header(&mut storage, header_with_entry); - assert!(FinalityCache::::get(&header_with_entry_hash).is_some()); - - // when we later prune this header, cache entry is removed - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: interval - 1, - oldest_block_to_keep: interval - 1, - }); - storage.finalize_and_prune_headers(None, interval + 1); - assert_eq!(FinalityCache::::get(&header_with_entry_hash), None); - }); - } - - #[test] - fn cached_finality_votes_finds_entry() { - run_test(TOTAL_VALIDATORS, |ctx| { - // insert 5 headers - let mut storage = BridgeStorage::::new(); - let mut headers = Vec::new(); - for i in 1..5 { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); - headers.push(header.clone()); - insert_header(&mut storage, header); - } - - // when inserting header#6, entry isn't found - let id5 = headers.last().unwrap().compute_id(); - assert_eq!( - storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: headers - .iter() - .map(|header| (header.compute_id(), None, header.clone(),)) - .rev() - .collect(), - votes: None, - }, - ); - - // let's now create entry at #3 - let hash3 = headers[2].compute_hash(); - let votes_at_3 = FinalityVotes { - votes: vec![([42; 20].into(), 21)].into_iter().collect(), - ancestry: vec![FinalityAncestor { - id: HeaderId { number: 100, hash: Default::default() }, - ..Default::default() - }] - .into_iter() - .collect(), - }; - FinalityCache::::insert(hash3, votes_at_3.clone()); - - // searching at #6 again => entry is found - assert_eq!( - storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: headers - .iter() - .skip(3) - .map(|header| (header.compute_id(), None, header.clone(),)) - .rev() - .collect(), - votes: Some(votes_at_3), - }, - ); - }); - } - - #[test] - fn cached_finality_votes_stops_at_finalized_sibling() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - - // insert header1 - let header1 = HeaderBuilder::with_parent_number(0).sign_by_set(&ctx.validators); - let header1_id = header1.compute_id(); - insert_header(&mut storage, header1); - - // insert header1' - sibling of header1 - let header1s = HeaderBuilder::with_parent_number(0) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(&ctx.validators); - let header1s_id = header1s.compute_id(); - insert_header(&mut storage, header1s); - - // header1 is finalized - FinalizedBlock::::put(header1_id); - - // trying to get finality votes when importing header2 -> header1 succeeds - assert!( - !storage - .cached_finality_votes(&header1_id, &genesis().compute_id(), |_| false) - .stopped_at_finalized_sibling - ); - - // trying to get finality votes when importing header2s -> header1s fails - assert!( - storage - .cached_finality_votes(&header1s_id, &header1_id, |_| false) - .stopped_at_finalized_sibling - ); - }); - } - - #[test] - fn verify_transaction_finalized_works_for_best_finalized_header() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_works_for_best_finalized_header_ancestor() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(verify_transaction_finalized( - &storage, - example_header_parent().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_proof_with_missing_tx() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 1, - &[], - ),); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_unknown_header() { - run_test(TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 1, - &[], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_unfinalized_header() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_finalized_header_sibling() { - run_test(TOTAL_VALIDATORS, |_| { - let mut finalized_header_sibling = example_header(); - finalized_header_sibling.timestamp = 1; - let finalized_header_sibling_hash = finalized_header_sibling.compute_hash(); - - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - insert_header(&mut storage, finalized_header_sibling); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(!verify_transaction_finalized( - &storage, - finalized_header_sibling_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_finalized_header_uncle() { - run_test(TOTAL_VALIDATORS, |_| { - let mut finalized_header_uncle = example_header_parent(); - finalized_header_uncle.timestamp = 1; - let finalized_header_uncle_hash = finalized_header_uncle.compute_hash(); - - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, finalized_header_uncle); - insert_header(&mut storage, example_header()); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(!verify_transaction_finalized( - &storage, - finalized_header_uncle_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_invalid_transactions_in_proof() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[ - (example_tx(), example_tx_receipt(true)), - (example_tx(), example_tx_receipt(true)) - ], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_invalid_receipts_in_proof() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), vec![42])], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_failed_transaction() { - run_test_with_genesis(example_header_with_failed_receipt(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header_with_failed_receipt().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(false))], - )); - }); - } -} diff --git a/bridges/modules/ethereum/src/mock.rs b/bridges/modules/ethereum/src/mock.rs deleted file mode 100644 index 877f7a9dc11f..000000000000 --- a/bridges/modules/ethereum/src/mock.rs +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -pub use crate::test_utils::{ - insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT, -}; -pub use bp_eth_poa::signatures::secret_to_address; - -use crate::{ - validators::{ValidatorsConfiguration, ValidatorsSource}, - AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy, -}; -use bp_eth_poa::{Address, AuraHeader, H256, U256}; -use frame_support::{parameter_types, traits::GenesisBuild, weights::Weight}; -use libsecp256k1::SecretKey; -use sp_runtime::{ - testing::Header as SubstrateHeader, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as pallet_ethereum; - -frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Ethereum: pallet_ethereum::{Pallet, Call}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = SubstrateHeader; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -parameter_types! { - pub const TestFinalityVotesCachingInterval: Option = Some(16); - pub TestAuraConfiguration: AuraConfiguration = test_aura_config(); - pub TestValidatorsConfiguration: ValidatorsConfiguration = test_validators_config(); -} - -impl Config for TestRuntime { - type AuraConfiguration = TestAuraConfiguration; - type ValidatorsConfiguration = TestValidatorsConfiguration; - type FinalityVotesCachingInterval = TestFinalityVotesCachingInterval; - type PruningStrategy = KeepSomeHeadersBehindBest; - type ChainTime = ConstChainTime; - type OnHeadersSubmitted = (); -} - -/// Test context. -pub struct TestContext { - /// Initial (genesis) header. - pub genesis: AuraHeader, - /// Number of initial validators. - pub total_validators: usize, - /// Secret keys of validators, ordered by validator index. - pub validators: Vec, - /// Addresses of validators, ordered by validator index. - pub addresses: Vec
, -} - -/// Aura configuration that is used in tests by default. -pub fn test_aura_config() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: u64::max_value(), - strict_empty_steps_transition: 0, - validate_step_transition: 0x16e360, - validate_score_transition: 0x41a3c4, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration that is used in tests by default. -pub fn test_validators_config() -> ValidatorsConfiguration { - ValidatorsConfiguration::Single(ValidatorsSource::List(validators_addresses(3))) -} - -/// Genesis header that is used in tests by default. -pub fn genesis() -> AuraHeader { - HeaderBuilder::genesis().sign_by(&validator(0)) -} - -/// Run test with default genesis header. -pub fn run_test(total_validators: usize, test: impl FnOnce(TestContext) -> T) -> T { - run_test_with_genesis(genesis(), total_validators, test) -} - -/// Run test with default genesis header. -pub fn run_test_with_genesis( - genesis: AuraHeader, - total_validators: usize, - test: impl FnOnce(TestContext) -> T, -) -> T { - let validators = validators(total_validators); - let addresses = validators_addresses(total_validators); - sp_io::TestExternalities::from( - GenesisBuild::::build_storage(&CrateGenesisConfig { - initial_header: genesis.clone(), - initial_difficulty: 0.into(), - initial_validators: addresses.clone(), - }) - .unwrap(), - ) - .execute_with(|| test(TestContext { genesis, total_validators, validators, addresses })) -} - -/// Pruning strategy that keeps 10 headers behind best block. -pub struct KeepSomeHeadersBehindBest(pub u64); - -impl Default for KeepSomeHeadersBehindBest { - fn default() -> KeepSomeHeadersBehindBest { - KeepSomeHeadersBehindBest(10) - } -} - -impl PruningStrategy for KeepSomeHeadersBehindBest { - fn pruning_upper_bound(&mut self, best_number: u64, _: u64) -> u64 { - best_number.saturating_sub(self.0) - } -} - -/// Constant chain time -#[derive(Default)] -pub struct ConstChainTime; - -impl ChainTime for ConstChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = i32::max_value() as u64 / 2; - timestamp > now - } -} diff --git a/bridges/modules/ethereum/src/test_utils.rs b/bridges/modules/ethereum/src/test_utils.rs deleted file mode 100644 index 414445f3aacc..000000000000 --- a/bridges/modules/ethereum/src/test_utils.rs +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for testing and benchmarking the Ethereum Bridge Pallet. -//! -//! Although the name implies that it is used by tests, it shouldn't be be used _directly_ by tests. -//! Instead these utilities should be used by the Mock runtime, which in turn is used by tests. -//! -//! On the other hand, they may be used directly by the benchmark module. - -// Since this is test code it's fine that not everything is used -#![allow(dead_code)] - -use crate::{ - finality::FinalityVotes, validators::CHANGE_EVENT_HASH, verification::calculate_score, Config, - HeaderToImport, Storage, -}; - -use bp_eth_poa::{ - rlp_encode, - signatures::{secret_to_address, sign, SignHeader}, - Address, AuraHeader, Bloom, Receipt, SealedEmptyStep, H256, U256, -}; -use libsecp256k1::SecretKey; -use sp_std::prelude::*; - -/// Gas limit valid in test environment. -pub const GAS_LIMIT: u64 = 0x2000; - -/// Test header builder. -pub struct HeaderBuilder { - header: AuraHeader, - parent_header: AuraHeader, -} - -impl HeaderBuilder { - /// Creates default genesis header. - pub fn genesis() -> Self { - let current_step = 0u64; - Self { - header: AuraHeader { - gas_limit: GAS_LIMIT.into(), - seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], - ..Default::default() - }, - parent_header: Default::default(), - } - } - - /// Creates default header on top of test parent with given hash. - #[cfg(test)] - pub fn with_parent_hash(parent_hash: H256) -> Self { - Self::with_parent_hash_on_runtime::(parent_hash) - } - - /// Creates default header on top of test parent with given number. First parent is selected. - #[cfg(test)] - pub fn with_parent_number(parent_number: u64) -> Self { - Self::with_parent_number_on_runtime::(parent_number) - } - - /// Creates default header on top of parent with given hash. - pub fn with_parent_hash_on_runtime, I: 'static>(parent_hash: H256) -> Self { - use crate::Headers; - - let parent_header = Headers::::get(&parent_hash).unwrap().header; - Self::with_parent(&parent_header) - } - - /// Creates default header on top of parent with given number. First parent is selected. - pub fn with_parent_number_on_runtime, I: 'static>(parent_number: u64) -> Self { - use crate::HeadersByNumber; - - let parent_hash = HeadersByNumber::::get(parent_number).unwrap()[0]; - Self::with_parent_hash_on_runtime::(parent_hash) - } - - /// Creates default header on top of non-existent parent. - #[cfg(test)] - pub fn with_number(number: u64) -> Self { - Self::with_parent(&AuraHeader { - number: number - 1, - seal: vec![bp_eth_poa::rlp_encode(&(number - 1)).to_vec(), vec![]], - ..Default::default() - }) - } - - /// Creates default header on top of given parent. - pub fn with_parent(parent_header: &AuraHeader) -> Self { - let parent_step = parent_header.step().unwrap(); - let current_step = parent_step + 1; - Self { - header: AuraHeader { - parent_hash: parent_header.compute_hash(), - number: parent_header.number + 1, - gas_limit: GAS_LIMIT.into(), - seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], - difficulty: calculate_score(parent_step, current_step, 0), - ..Default::default() - }, - parent_header: parent_header.clone(), - } - } - - /// Update step of this header. - pub fn step(mut self, step: u64) -> Self { - let parent_step = self.parent_header.step(); - self.header.seal[0] = rlp_encode(&step).to_vec(); - self.header.difficulty = parent_step - .map(|parent_step| calculate_score(parent_step, step, 0)) - .unwrap_or_default(); - self - } - - /// Adds empty steps to this header. - pub fn empty_steps(mut self, empty_steps: &[(&SecretKey, u64)]) -> Self { - let sealed_empty_steps = empty_steps - .iter() - .map(|(author, step)| { - let mut empty_step = SealedEmptyStep { step: *step, signature: Default::default() }; - let message = empty_step.message(&self.header.parent_hash); - let signature: [u8; 65] = sign(author, message).into(); - empty_step.signature = signature.into(); - empty_step - }) - .collect::>(); - - // by default in test configuration headers are generated without empty steps seal - if self.header.seal.len() < 3 { - self.header.seal.push(Vec::new()); - } - - self.header.seal[2] = SealedEmptyStep::rlp_of(&sealed_empty_steps); - self - } - - /// Update difficulty field of this header. - pub fn difficulty(mut self, difficulty: U256) -> Self { - self.header.difficulty = difficulty; - self - } - - /// Update extra data field of this header. - pub fn extra_data(mut self, extra_data: Vec) -> Self { - self.header.extra_data = extra_data; - self - } - - /// Update gas limit field of this header. - pub fn gas_limit(mut self, gas_limit: U256) -> Self { - self.header.gas_limit = gas_limit; - self - } - - /// Update gas used field of this header. - pub fn gas_used(mut self, gas_used: U256) -> Self { - self.header.gas_used = gas_used; - self - } - - /// Update log bloom field of this header. - pub fn log_bloom(mut self, log_bloom: Bloom) -> Self { - self.header.log_bloom = log_bloom; - self - } - - /// Update receipts root field of this header. - pub fn receipts_root(mut self, receipts_root: H256) -> Self { - self.header.receipts_root = receipts_root; - self - } - - /// Update timestamp field of this header. - pub fn timestamp(mut self, timestamp: u64) -> Self { - self.header.timestamp = timestamp; - self - } - - /// Update transactions root field of this header. - pub fn transactions_root(mut self, transactions_root: H256) -> Self { - self.header.transactions_root = transactions_root; - self - } - - /// Signs header by given author. - pub fn sign_by(self, author: &SecretKey) -> AuraHeader { - self.header.sign_by(author) - } - - /// Signs header by given authors set. - pub fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader { - self.header.sign_by_set(authors) - } -} - -/// Helper function for getting a genesis header which has been signed by an authority. -pub fn build_genesis_header(author: &SecretKey) -> AuraHeader { - let genesis = HeaderBuilder::genesis(); - genesis.header.sign_by(author) -} - -/// Helper function for building a custom child header which has been signed by an authority. -pub fn build_custom_header( - author: &SecretKey, - previous: &AuraHeader, - customize_header: F, -) -> AuraHeader -where - F: FnOnce(AuraHeader) -> AuraHeader, -{ - let new_header = HeaderBuilder::with_parent(previous); - let custom_header = customize_header(new_header.header); - custom_header.sign_by(author) -} - -/// Insert unverified header into storage. -/// -/// This function assumes that the header is signed by validator from the current set. -pub fn insert_header(storage: &mut S, header: AuraHeader) { - let id = header.compute_id(); - let best_finalized = storage.finalized_block(); - let import_context = storage.import_context(None, &header.parent_hash).unwrap(); - let parent_finality_votes = - storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false); - let finality_votes = crate::finality::prepare_votes( - parent_finality_votes, - best_finalized, - &import_context.validators_set().validators.iter().collect(), - id, - &header, - None, - ) - .unwrap(); - - storage.insert_header(HeaderToImport { - context: storage.import_context(None, &header.parent_hash).unwrap(), - is_best: true, - id, - header, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes, - }); -} - -/// Insert unverified header into storage. -/// -/// No assumptions about header author are made. The cost is that finality votes cache -/// is filled incorrectly, so this function shall not be used if you're going to insert -/// (or import) header descendants. -pub fn insert_dummy_header(storage: &mut S, header: AuraHeader) { - storage.insert_header(HeaderToImport { - context: storage.import_context(None, &header.parent_hash).unwrap(), - is_best: true, - id: header.compute_id(), - header, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes: FinalityVotes::default(), - }); -} - -pub fn validators_change_receipt(parent_hash: H256) -> Receipt { - use bp_eth_poa::{LogEntry, TransactionOutcome}; - - Receipt { - gas_used: 0.into(), - log_bloom: (&[0xff; 256]).into(), - outcome: TransactionOutcome::Unknown, - logs: vec![LogEntry { - address: [3; 20].into(), - topics: vec![CHANGE_EVENT_HASH.into(), parent_hash], - data: vec![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - ], - }], - } -} - -pub mod validator_utils { - use super::*; - - /// Return key pair of given test validator. - pub fn validator(index: usize) -> SecretKey { - let mut raw_secret = [0u8; 32]; - raw_secret[..8].copy_from_slice(&(index + 1).to_le_bytes()); - SecretKey::parse(&raw_secret).unwrap() - } - - /// Return key pairs of all test validators. - pub fn validators(count: usize) -> Vec { - (0..count).map(validator).collect() - } - - /// Return address of test validator. - pub fn validator_address(index: usize) -> Address { - secret_to_address(&validator(index)) - } - - /// Return addresses of all test validators. - pub fn validators_addresses(count: usize) -> Vec
{ - (0..count).map(validator_address).collect() - } -} diff --git a/bridges/modules/ethereum/src/validators.rs b/bridges/modules/ethereum/src/validators.rs deleted file mode 100644 index fd010d52c39d..000000000000 --- a/bridges/modules/ethereum/src/validators.rs +++ /dev/null @@ -1,458 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{error::Error, ChangeToEnact, Storage}; -use bp_eth_poa::{Address, AuraHeader, HeaderId, LogEntry, Receipt, U256}; -use sp_std::prelude::*; - -/// The hash of InitiateChange event of the validators set contract. -pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[ - 0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, - 0xd2, 0xc2, 0x28, 0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89, -]; - -/// Where source of validators addresses come from. This covers the chain lifetime. -pub enum ValidatorsConfiguration { - /// There's a single source for the whole chain lifetime. - Single(ValidatorsSource), - /// Validators source changes at given blocks. The blocks are ordered - /// by the block number. - Multi(Vec<(u64, ValidatorsSource)>), -} - -/// Where validators addresses come from. -/// -/// This source is valid within some blocks range. The blocks range could -/// cover multiple epochs - i.e. the validators that are authoring blocks -/// within this range could change, but the source itself can not. -#[cfg_attr(any(test, feature = "runtime-benchmarks"), derive(Debug, PartialEq))] -pub enum ValidatorsSource { - /// The validators addresses are hardcoded and never change. - List(Vec
), - /// The validators addresses are determined by the validators set contract - /// deployed at given address. The contract must implement the `ValidatorSet` - /// interface. Additionally, the initial validators set must be provided. - Contract(Address, Vec
), -} - -/// A short hand for optional validators change. -pub type ValidatorsChange = Option>; - -/// Validators manager. -pub struct Validators<'a> { - config: &'a ValidatorsConfiguration, -} - -impl<'a> Validators<'a> { - /// Creates new validators manager using given configuration. - pub fn new(config: &'a ValidatorsConfiguration) -> Self { - Self { config } - } - - /// Returns true if header (probabilistically) signals validators change and - /// the caller needs to provide transactions receipts to import the header. - pub fn maybe_signals_validators_change(&self, header: &AuraHeader) -> bool { - let (_, _, source) = self.source_at(header.number); - - // if we are taking validators set from the fixed list, there's always - // single epoch - // => we never require transactions receipts - let contract_address = match source { - ValidatorsSource::List(_) => return false, - ValidatorsSource::Contract(contract_address, _) => contract_address, - }; - - // else we need to check logs bloom and if it has required bits set, it means - // that the contract has (probably) emitted epoch change event - let expected_bloom = LogEntry { - address: *contract_address, - topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], - data: Vec::new(), // irrelevant for bloom. - } - .bloom(); - - header.log_bloom.contains(&expected_bloom) - } - - /// Extracts validators change signal from the header. - /// - /// Returns tuple where first element is the change scheduled by this header - /// (i.e. this change is only applied starting from the block that has finalized - /// current block). The second element is the immediately applied change. - pub fn extract_validators_change( - &self, - header: &AuraHeader, - receipts: Option>, - ) -> Result<(ValidatorsChange, ValidatorsChange), Error> { - // let's first check if new source is starting from this header - let (source_index, _, source) = self.source_at(header.number); - let (next_starts_at, next_source) = self.source_at_next_header(source_index, header.number); - if next_starts_at == header.number { - match *next_source { - ValidatorsSource::List(ref new_list) => return Ok((None, Some(new_list.clone()))), - ValidatorsSource::Contract(_, ref new_list) => - return Ok((Some(new_list.clone()), None)), - } - } - - // else deal with previous source - // - // if we are taking validators set from the fixed list, there's always - // single epoch - // => we never require transactions receipts - let contract_address = match source { - ValidatorsSource::List(_) => return Ok((None, None)), - ValidatorsSource::Contract(contract_address, _) => contract_address, - }; - - // else we need to check logs bloom and if it has required bits set, it means - // that the contract has (probably) emitted epoch change event - let expected_bloom = LogEntry { - address: *contract_address, - topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], - data: Vec::new(), // irrelevant for bloom. - } - .bloom(); - - if !header.log_bloom.contains(&expected_bloom) { - return Ok((None, None)) - } - - let receipts = receipts.ok_or(Error::MissingTransactionsReceipts)?; - #[allow(clippy::question_mark)] - if header.check_receipts_root(&receipts).is_err() { - return Err(Error::TransactionsReceiptsMismatch) - } - - // iterate in reverse because only the _last_ change in a given - // block actually has any effect - Ok(( - receipts - .iter() - .rev() - .filter(|r| r.log_bloom.contains(&expected_bloom)) - .flat_map(|r| r.logs.iter()) - .filter(|l| { - l.address == *contract_address && - l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH && - l.topics[1] == header.parent_hash - }) - .filter_map(|l| { - let data_len = l.data.len(); - if data_len < 64 { - return None - } - - let new_validators_len_u256 = U256::from_big_endian(&l.data[32..64]); - let new_validators_len = new_validators_len_u256.low_u64(); - if new_validators_len_u256 != new_validators_len.into() { - return None - } - - if (data_len - 64) as u64 != new_validators_len.saturating_mul(32) { - return None - } - - Some( - l.data[64..] - .chunks(32) - .map(|chunk| { - let mut new_validator = Address::default(); - new_validator.as_mut().copy_from_slice(&chunk[12..32]); - new_validator - }) - .collect(), - ) - }) - .next(), - None, - )) - } - - /// Finalize changes when blocks are finalized. - pub fn finalize_validators_change( - &self, - storage: &S, - finalized_blocks: &[(HeaderId, Option)], - ) -> Option { - // if we haven't finalized any blocks, no changes may be finalized - let newest_finalized_id = match finalized_blocks.last().map(|(id, _)| id) { - Some(last_finalized_id) => last_finalized_id, - None => return None, - }; - let oldest_finalized_id = finalized_blocks - .first() - .map(|(id, _)| id) - .expect("finalized_blocks is not empty; qed"); - - // try to directly go to the header that has scheduled last change - // - // if we're unable to create import context for some block, it means - // that the header has already been pruned => it and its ancestors had - // no scheduled changes - // - // if we're unable to find scheduled changes for some block, it means - // that these changes have been finalized already - storage - .import_context(None, &newest_finalized_id.hash) - .and_then(|context| context.last_signal_block()) - .and_then(|signal_block| { - if signal_block.number >= oldest_finalized_id.number { - Some(signal_block) - } else { - None - } - }) - .and_then(|signal_block| { - storage.scheduled_change(&signal_block.hash).map(|change| ChangeToEnact { - signal_block: Some(signal_block), - validators: change.validators, - }) - }) - } - - /// Returns source of validators that should author the header. - fn source_at(&self, header_number: u64) -> (usize, u64, &ValidatorsSource) { - match self.config { - ValidatorsConfiguration::Single(ref source) => (0, 0, source), - ValidatorsConfiguration::Multi(ref sources) => sources - .iter() - .rev() - .enumerate() - .find(|(_, &(begin, _))| begin < header_number) - .map(|(i, (begin, source))| (sources.len() - 1 - i, *begin, source)) - .expect( - "there's always entry for the initial block;\ - we do not touch any headers with number < initial block number; qed", - ), - } - } - - /// Returns source of validators that should author the next header. - fn source_at_next_header( - &self, - header_source_index: usize, - header_number: u64, - ) -> (u64, &ValidatorsSource) { - match self.config { - ValidatorsConfiguration::Single(ref source) => (0, source), - ValidatorsConfiguration::Multi(ref sources) => { - let next_source_index = header_source_index + 1; - if next_source_index < sources.len() { - let next_source = &sources[next_source_index]; - if next_source.0 < header_number + 1 { - return (next_source.0, &next_source.1) - } - } - - let source = &sources[header_source_index]; - (source.0, &source.1) - }, - } - } -} - -impl ValidatorsSource { - /// Returns initial validators set. - pub fn initial_epoch_validators(&self) -> Vec
{ - match self { - ValidatorsSource::List(ref list) => list.clone(), - ValidatorsSource::Contract(_, ref list) => list.clone(), - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::{ - mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime}, - AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader, - }; - use bp_eth_poa::compute_merkle_root; - - const TOTAL_VALIDATORS: usize = 3; - - #[test] - fn source_at_works() { - let config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (100, ValidatorsSource::List(vec![[2; 20].into()])), - (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ]); - let validators = Validators::new(&config); - - assert_eq!(validators.source_at(99), (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])),); - assert_eq!( - validators.source_at_next_header(0, 99), - (0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - - assert_eq!( - validators.source_at(100), - (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(0, 100), - (100, &ValidatorsSource::List(vec![[2; 20].into()])), - ); - - assert_eq!( - validators.source_at(200), - (1, 100, &ValidatorsSource::List(vec![[2; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(1, 200), - (200, &ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ); - } - - #[test] - fn maybe_signals_validators_change_works() { - // when contract is active, but bloom has no required bits set - let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract( - Default::default(), - Vec::new(), - )); - let validators = Validators::new(&config); - let mut header = AuraHeader { number: u64::max_value(), ..Default::default() }; - assert!(!validators.maybe_signals_validators_change(&header)); - - // when contract is active and bloom has required bits set - header.log_bloom = (&[0xff; 256]).into(); - assert!(validators.maybe_signals_validators_change(&header)); - - // when list is active and bloom has required bits set - let config = ValidatorsConfiguration::Single(ValidatorsSource::List(vec![[42; 20].into()])); - let validators = Validators::new(&config); - assert!(!validators.maybe_signals_validators_change(&header)); - } - - #[test] - fn extract_validators_change_works() { - let config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (100, ValidatorsSource::List(vec![[2; 20].into()])), - (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ]); - let validators = Validators::new(&config); - let mut header = AuraHeader { number: 100, ..Default::default() }; - - // when we're at the block that switches to list source - assert_eq!( - validators.extract_validators_change(&header, None), - Ok((None, Some(vec![[2; 20].into()]))), - ); - - // when we're inside list range - header.number = 150; - assert_eq!(validators.extract_validators_change(&header, None), Ok((None, None)),); - - // when we're at the block that switches to contract source - header.number = 200; - assert_eq!( - validators.extract_validators_change(&header, None), - Ok((Some(vec![[3; 20].into()]), None)), - ); - - // when we're inside contract range and logs bloom signals change - // but we have no receipts - header.number = 250; - header.log_bloom = (&[0xff; 256]).into(); - assert_eq!( - validators.extract_validators_change(&header, None), - Err(Error::MissingTransactionsReceipts), - ); - - // when we're inside contract range and logs bloom signals change - // but there's no change in receipts - header.receipts_root = "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - .parse() - .unwrap(); - assert_eq!( - validators.extract_validators_change(&header, Some(Vec::new())), - Ok((None, None)), - ); - - // when we're inside contract range and logs bloom signals change - // and there's change in receipts - let receipts = vec![validators_change_receipt(Default::default())]; - header.receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); - assert_eq!( - validators.extract_validators_change(&header, Some(receipts)), - Ok((Some(vec![[7; 20].into()]), None)), - ); - - // when incorrect receipts root passed - assert_eq!( - validators.extract_validators_change(&header, Some(Vec::new())), - Err(Error::TransactionsReceiptsMismatch), - ); - } - - fn try_finalize_with_scheduled_change(scheduled_at: Option) -> Option { - run_test(TOTAL_VALIDATORS, |_| { - let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract( - Default::default(), - Vec::new(), - )); - let validators = Validators::new(&config); - let storage = BridgeStorage::::new(); - - // when we're finailizing blocks 10...100 - let id10 = HeaderId { number: 10, hash: [10; 32].into() }; - let id100 = HeaderId { number: 100, hash: [100; 32].into() }; - let finalized_blocks = vec![(id10, None), (id100, None)]; - let header100 = StoredHeader:: { - submitter: None, - header: AuraHeader { number: 100, ..Default::default() }, - total_difficulty: 0.into(), - next_validators_set_id: 0, - last_signal_block: scheduled_at, - }; - let scheduled_change = AuraScheduledChange { - validators: validators_addresses(1), - prev_signal_block: None, - }; - Headers::::insert(id100.hash, header100); - if let Some(scheduled_at) = scheduled_at { - ScheduledChanges::::insert(scheduled_at.hash, scheduled_change); - } - - validators.finalize_validators_change(&storage, &finalized_blocks) - }) - } - - #[test] - fn finalize_validators_change_finalizes_scheduled_change() { - let id50 = HeaderId { number: 50, ..Default::default() }; - assert_eq!( - try_finalize_with_scheduled_change(Some(id50)), - Some(ChangeToEnact { signal_block: Some(id50), validators: validators_addresses(1) }), - ); - } - - #[test] - fn finalize_validators_change_does_not_finalize_when_changes_are_not_scheduled() { - assert_eq!(try_finalize_with_scheduled_change(None), None,); - } - - #[test] - fn finalize_validators_change_does_not_finalize_changes_when_they_are_outside_of_range() { - let id5 = HeaderId { number: 5, ..Default::default() }; - assert_eq!(try_finalize_with_scheduled_change(Some(id5)), None,); - } -} diff --git a/bridges/modules/ethereum/src/verification.rs b/bridges/modules/ethereum/src/verification.rs deleted file mode 100644 index 053ce4d0fea4..000000000000 --- a/bridges/modules/ethereum/src/verification.rs +++ /dev/null @@ -1,972 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::Error, - validators::{Validators, ValidatorsConfiguration}, - AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage, -}; -use bp_eth_poa::{ - public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep, - H256, H520, U128, U256, -}; -use codec::Encode; -use sp_io::crypto::secp256k1_ecdsa_recover; -use sp_runtime::transaction_validity::TransactionTag; -use sp_std::{vec, vec::Vec}; - -/// Pre-check to see if should try and import this header. -/// Returns error if we should not try to import this block. -/// Returns ID of passed header and best finalized header. -pub fn is_importable_header( - storage: &S, - header: &AuraHeader, -) -> Result<(HeaderId, HeaderId), Error> { - // we never import any header that competes with finalized header - let finalized_id = storage.finalized_block(); - if header.number <= finalized_id.number { - return Err(Error::AncientHeader) - } - // we never import any header with known hash - let id = header.compute_id(); - if storage.header(&id.hash).is_some() { - return Err(Error::KnownHeader) - } - - Ok((id, finalized_id)) -} - -/// Try to accept unsigned aura header into transaction pool. -/// -/// Returns required and provided tags. -pub fn accept_aura_header_into_pool( - storage: &S, - config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - pool_config: &PoolConfiguration, - header: &AuraHeader, - chain_time: &CT, - receipts: Option<&Vec>, -) -> Result<(Vec, Vec), Error> { - // check if we can verify further - let (header_id, _) = is_importable_header(storage, header)?; - - // we can always do contextless checks - contextless_checks(config, header, chain_time)?; - - // we want to avoid having same headers twice in the pool - // => we're strict about receipts here - if we need them, we require receipts to be Some, - // otherwise we require receipts to be None - let receipts_required = - Validators::new(validators_config).maybe_signals_validators_change(header); - match (receipts_required, receipts.is_some()) { - (true, false) => return Err(Error::MissingTransactionsReceipts), - (false, true) => return Err(Error::RedundantTransactionsReceipts), - _ => (), - } - - // we do not want to have all future headers in the pool at once - // => if we see header with number > maximal ever seen header number + LIMIT, - // => we consider this transaction invalid, but only at this moment (we do not want to ban it) - // => let's mark it as Unknown transaction - let (best_id, _) = storage.best_block(); - let difference = header.number.saturating_sub(best_id.number); - if difference > pool_config.max_future_number_difference { - return Err(Error::UnsignedTooFarInTheFuture) - } - - // TODO: only accept new headers when we're at the tip of PoA chain - // https://github.com/paritytech/parity-bridges-common/issues/38 - - // we want to see at most one header with given number from single authority - // => every header is providing tag (block_number + authority) - // => since only one tx in the pool can provide the same tag, they're auto-deduplicated - let provides_number_and_authority_tag = (header.number, header.author).encode(); - - // we want to see several 'future' headers in the pool at once, but we may not have access to - // previous headers here - // => we can at least 'verify' that headers comprise a chain by providing and requiring - // tag (header.number, header.hash) - let provides_header_number_and_hash_tag = header_id.encode(); - - // depending on whether parent header is available, we either perform full or 'shortened' check - let context = storage.import_context(None, &header.parent_hash); - let tags = match context { - Some(context) => { - let header_step = contextual_checks(config, &context, None, header)?; - validator_checks(config, &context.validators_set().validators, header, header_step)?; - - // since our parent is already in the storage, we do not require it - // to be in the transaction pool - (vec![], vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag]) - }, - None => { - // we know nothing about parent header - // => the best thing we can do is to believe that there are no forks in - // PoA chain AND that the header is produced either by previous, or next - // scheduled validators set change - let header_step = header.step().ok_or(Error::MissingStep)?; - let best_context = storage.import_context(None, &best_id.hash).expect( - "import context is None only when header is missing from the storage;\ - best header is always in the storage; qed", - ); - let validators_check_result = validator_checks( - config, - &best_context.validators_set().validators, - header, - header_step, - ); - if let Err(error) = validators_check_result { - find_next_validators_signal(storage, &best_context).ok_or(error).and_then( - |next_validators| { - validator_checks(config, &next_validators, header, header_step) - }, - )?; - } - - // since our parent is missing from the storage, we **DO** require it - // to be in the transaction pool - // (- 1 can't underflow because there's always best block in the header) - let requires_header_number_and_hash_tag = - HeaderId { number: header.number - 1, hash: header.parent_hash }.encode(); - ( - vec![requires_header_number_and_hash_tag], - vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag], - ) - }, - }; - - // the heaviest, but rare operation - we do not want invalid receipts in the pool - if let Some(receipts) = receipts { - log::trace!(target: "runtime", "Got receipts! {:?}", receipts); - #[allow(clippy::question_mark)] - if header.check_receipts_root(receipts).is_err() { - return Err(Error::TransactionsReceiptsMismatch) - } - } - - Ok(tags) -} - -/// Verify header by Aura rules. -pub fn verify_aura_header( - storage: &S, - config: &AuraConfiguration, - submitter: Option, - header: &AuraHeader, - chain_time: &CT, -) -> Result, Error> { - // let's do the lightest check first - contextless_checks(config, header, chain_time)?; - - // the rest of checks requires access to the parent header - let context = storage.import_context(submitter, &header.parent_hash).ok_or_else(|| { - log::warn!( - target: "runtime", - "Missing parent PoA block: ({:?}, {})", - header.number.checked_sub(1), - header.parent_hash, - ); - - Error::MissingParentBlock - })?; - let header_step = contextual_checks(config, &context, None, header)?; - validator_checks(config, &context.validators_set().validators, header, header_step)?; - - Ok(context) -} - -/// Perform basic checks that only require header itself. -fn contextless_checks( - config: &AuraConfiguration, - header: &AuraHeader, - chain_time: &CT, -) -> Result<(), Error> { - let expected_seal_fields = expected_header_seal_fields(config, header); - if header.seal.len() != expected_seal_fields { - return Err(Error::InvalidSealArity) - } - if header.number >= u64::max_value() { - return Err(Error::RidiculousNumber) - } - if header.gas_used > header.gas_limit { - return Err(Error::TooMuchGasUsed) - } - if header.gas_limit < config.min_gas_limit { - return Err(Error::InvalidGasLimit) - } - if header.gas_limit > config.max_gas_limit { - return Err(Error::InvalidGasLimit) - } - if header.number != 0 && header.extra_data.len() as u64 > config.maximum_extra_data_size { - return Err(Error::ExtraDataOutOfBounds) - } - - // we can't detect if block is from future in runtime - // => let's only do an overflow check - if header.timestamp > i32::max_value() as u64 { - return Err(Error::TimestampOverflow) - } - - if chain_time.is_timestamp_ahead(header.timestamp) { - return Err(Error::HeaderTimestampIsAhead) - } - - Ok(()) -} - -/// Perform checks that require access to parent header. -fn contextual_checks( - config: &AuraConfiguration, - context: &ImportContext, - validators_override: Option<&[Address]>, - header: &AuraHeader, -) -> Result { - let validators = validators_override.unwrap_or_else(|| &context.validators_set().validators); - let header_step = header.step().ok_or(Error::MissingStep)?; - let parent_step = context.parent_header().step().ok_or(Error::MissingStep)?; - - // Ensure header is from the step after context. - if header_step == parent_step { - return Err(Error::DoubleVote) - } - #[allow(clippy::suspicious_operation_groupings)] - if header.number >= config.validate_step_transition && header_step < parent_step { - return Err(Error::DoubleVote) - } - - // If empty step messages are enabled we will validate the messages in the seal, missing - // messages are not reported as there's no way to tell whether the empty step message was never - // sent or simply not included. - let empty_steps_len = match header.number >= config.empty_steps_transition { - true => { - let strict_empty_steps = header.number >= config.strict_empty_steps_transition; - let empty_steps = header.empty_steps().ok_or(Error::MissingEmptySteps)?; - let empty_steps_len = empty_steps.len(); - let mut prev_empty_step = 0; - - for empty_step in empty_steps { - if empty_step.step <= parent_step || empty_step.step >= header_step { - return Err(Error::InsufficientProof) - } - - if !verify_empty_step(&header.parent_hash, &empty_step, validators) { - return Err(Error::InsufficientProof) - } - - if strict_empty_steps { - if empty_step.step <= prev_empty_step { - return Err(Error::InsufficientProof) - } - - prev_empty_step = empty_step.step; - } - } - - empty_steps_len - }, - false => 0, - }; - - // Validate chain score. - if header.number >= config.validate_score_transition { - let expected_difficulty = calculate_score(parent_step, header_step, empty_steps_len as _); - if header.difficulty != expected_difficulty { - return Err(Error::InvalidDifficulty) - } - } - - Ok(header_step) -} - -/// Check that block is produced by expected validator. -fn validator_checks( - config: &AuraConfiguration, - validators: &[Address], - header: &AuraHeader, - header_step: u64, -) -> Result<(), Error> { - let expected_validator = *step_validator(validators, header_step); - if header.author != expected_validator { - return Err(Error::NotValidator) - } - - let validator_signature = header.signature().ok_or(Error::MissingSignature)?; - let header_seal_hash = header - .seal_hash(header.number >= config.empty_steps_transition) - .ok_or(Error::MissingEmptySteps)?; - let is_invalid_proposer = - !verify_signature(&expected_validator, &validator_signature, &header_seal_hash); - if is_invalid_proposer { - return Err(Error::NotValidator) - } - - Ok(()) -} - -/// Returns expected number of seal fields in the header. -fn expected_header_seal_fields(config: &AuraConfiguration, header: &AuraHeader) -> usize { - if header.number != u64::max_value() && header.number >= config.empty_steps_transition { - 3 - } else { - 2 - } -} - -/// Verify single sealed empty step. -fn verify_empty_step(parent_hash: &H256, step: &SealedEmptyStep, validators: &[Address]) -> bool { - let expected_validator = *step_validator(validators, step.step); - let message = step.message(parent_hash); - verify_signature(&expected_validator, &step.signature, &message) -} - -/// Chain scoring: total `weight is sqrt(U256::max_value())*height - step`. -pub(crate) fn calculate_score( - parent_step: u64, - current_step: u64, - current_empty_steps: usize, -) -> U256 { - U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) + - U256::from(current_empty_steps) -} - -/// Verify that the signature over message has been produced by given validator. -fn verify_signature(expected_validator: &Address, signature: &H520, message: &H256) -> bool { - secp256k1_ecdsa_recover(signature.as_fixed_bytes(), message.as_fixed_bytes()) - .map(|public| public_to_address(&public)) - .map(|address| *expected_validator == address) - .unwrap_or(false) -} - -/// Find next unfinalized validators set change after finalized set. -fn find_next_validators_signal( - storage: &S, - context: &ImportContext, -) -> Option> { - // that's the earliest block number we may met in following loop - // it may be None if that's the first set - let best_set_signal_block = context.validators_set().signal_block; - - // if parent schedules validators set change, then it may be our set - // else we'll start with last known change - let mut current_set_signal_block = context.last_signal_block(); - let mut next_scheduled_set: Option = None; - - loop { - // if we have reached block that signals finalized change, then - // next_current_block_hash points to the block that schedules next - // change - let current_scheduled_set = match current_set_signal_block { - Some(current_set_signal_block) - if Some(¤t_set_signal_block) == best_set_signal_block.as_ref() => - return next_scheduled_set.map(|scheduled_set| scheduled_set.validators), - None => return next_scheduled_set.map(|scheduled_set| scheduled_set.validators), - Some(current_set_signal_block) => - storage.scheduled_change(¤t_set_signal_block.hash).expect( - "header that is associated with this change is not pruned;\ - scheduled changes are only removed when header is pruned; qed", - ), - }; - - current_set_signal_block = current_scheduled_set.prev_signal_block; - next_scheduled_set = Some(current_scheduled_set); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - insert_header, run_test_with_genesis, test_aura_config, validator, validator_address, - validators_addresses, validators_change_receipt, AccountId, ConstChainTime, - HeaderBuilder, TestRuntime, GAS_LIMIT, - }, - pool_configuration, - validators::ValidatorsSource, - BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId, - ScheduledChanges, ValidatorsSet, ValidatorsSets, - }; - use bp_eth_poa::{compute_merkle_root, rlp_encode, TransactionOutcome, H520, U256}; - use hex_literal::hex; - use libsecp256k1::SecretKey; - use sp_runtime::transaction_validity::TransactionTag; - - const GENESIS_STEP: u64 = 42; - const TOTAL_VALIDATORS: usize = 3; - - fn genesis() -> AuraHeader { - HeaderBuilder::genesis().step(GENESIS_STEP).sign_by(&validator(0)) - } - - fn verify_with_config( - config: &AuraConfiguration, - header: &AuraHeader, - ) -> Result, Error> { - run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - verify_aura_header(&storage, config, None, header, &ConstChainTime::default()) - }) - } - - fn default_verify(header: &AuraHeader) -> Result, Error> { - verify_with_config(&test_aura_config(), header) - } - - fn default_accept_into_pool( - mut make_header: impl FnMut(&[SecretKey]) -> (AuraHeader, Option>), - ) -> Result<(Vec, Vec), Error> { - run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { - let validators = vec![validator(0), validator(1), validator(2)]; - let mut storage = BridgeStorage::::new(); - let block1 = HeaderBuilder::with_parent_number(0).sign_by_set(&validators); - insert_header(&mut storage, block1); - let block2 = HeaderBuilder::with_parent_number(1).sign_by_set(&validators); - let block2_id = block2.compute_id(); - insert_header(&mut storage, block2); - let block3 = HeaderBuilder::with_parent_number(2).sign_by_set(&validators); - insert_header(&mut storage, block3); - - FinalizedBlock::::put(block2_id); - - let validators_config = ValidatorsConfiguration::Single(ValidatorsSource::Contract( - Default::default(), - Vec::new(), - )); - let (header, receipts) = make_header(&validators); - accept_aura_header_into_pool( - &storage, - &test_aura_config(), - &validators_config, - &pool_configuration(), - &header, - &(), - receipts.as_ref(), - ) - }) - } - - fn change_validators_set_at( - number: u64, - finalized_set: Vec
, - signalled_set: Option>, - ) { - let set_id = NextValidatorsSetId::::get(); - NextValidatorsSetId::::put(set_id + 1); - ValidatorsSets::::insert( - set_id, - ValidatorsSet { - validators: finalized_set, - signal_block: None, - enact_block: HeaderId { - number: 0, - hash: HeadersByNumber::::get(&0).unwrap()[0], - }, - }, - ); - - let header_hash = HeadersByNumber::::get(&number).unwrap()[0]; - let mut header = Headers::::get(&header_hash).unwrap(); - header.next_validators_set_id = set_id; - if let Some(signalled_set) = signalled_set { - header.last_signal_block = Some(HeaderId { - number: header.header.number - 1, - hash: header.header.parent_hash, - }); - ScheduledChanges::::insert( - header.header.parent_hash, - AuraScheduledChange { validators: signalled_set, prev_signal_block: None }, - ); - } - - Headers::::insert(header_hash, header); - } - - #[test] - fn verifies_seal_count() { - // when there are no seals at all - let mut header = AuraHeader::default(); - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's single seal (we expect 2 or 3 seals) - header.seal = vec![vec![]]; - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's 3 seals (we expect 2 by default) - header.seal = vec![vec![], vec![], vec![]]; - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's 2 seals - header.seal = vec![vec![], vec![]]; - assert_ne!(default_verify(&header), Err(Error::InvalidSealArity)); - } - - #[test] - fn verifies_header_number() { - // when number is u64::max_value() - let header = HeaderBuilder::with_number(u64::max_value()).sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::RidiculousNumber)); - - // when header is < u64::max_value() - let header = HeaderBuilder::with_number(u64::max_value() - 1).sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::RidiculousNumber)); - } - - #[test] - fn verifies_gas_used() { - // when gas used is larger than gas limit - let header = HeaderBuilder::with_number(1) - .gas_used((GAS_LIMIT + 1).into()) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::TooMuchGasUsed)); - - // when gas used is less than gas limit - let header = HeaderBuilder::with_number(1) - .gas_used((GAS_LIMIT - 1).into()) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::TooMuchGasUsed)); - } - - #[test] - fn verifies_gas_limit() { - let mut config = test_aura_config(); - config.min_gas_limit = 100.into(); - config.max_gas_limit = 200.into(); - - // when limit is lower than expected - let header = HeaderBuilder::with_number(1).gas_limit(50.into()).sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - - // when limit is larger than expected - let header = HeaderBuilder::with_number(1).gas_limit(250.into()).sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - - // when limit is within expected range - let header = HeaderBuilder::with_number(1).gas_limit(150.into()).sign_by(&validator(0)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - } - - #[test] - fn verifies_extra_data_len() { - // when extra data is too large - let header = HeaderBuilder::with_number(1) - .extra_data(std::iter::repeat(42).take(1000).collect::>()) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); - - // when extra data size is OK - let header = HeaderBuilder::with_number(1) - .extra_data(std::iter::repeat(42).take(10).collect::>()) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); - } - - #[test] - fn verifies_timestamp() { - // when timestamp overflows i32 - let header = HeaderBuilder::with_number(1) - .timestamp(i32::max_value() as u64 + 1) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::TimestampOverflow)); - - // when timestamp doesn't overflow i32 - let header = HeaderBuilder::with_number(1) - .timestamp(i32::max_value() as u64) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::TimestampOverflow)); - } - - #[test] - fn verifies_chain_time() { - // expected import context after verification - let expect = ImportContext:: { - submitter: None, - parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3") - .into(), - parent_header: genesis(), - parent_total_difficulty: U256::zero(), - parent_scheduled_change: None, - validators_set_id: 0, - validators_set: ValidatorsSet { - validators: vec![ - hex!("dc5b20847f43d67928f49cd4f85d696b5a7617b5").into(), - hex!("897df33a7b3c62ade01e22c13d48f98124b4480f").into(), - hex!("05c987b34c6ef74e0c7e69c6e641120c24164c2d").into(), - ], - signal_block: None, - enact_block: HeaderId { - number: 0, - hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3") - .into(), - }, - }, - last_signal_block: None, - }; - - // header is behind - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2 - 100) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header).unwrap(), expect); - - // header is ahead - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2 + 100) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header), Err(Error::HeaderTimestampIsAhead)); - - // header has same timestamp as ConstChainTime - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header).unwrap(), expect); - } - - #[test] - fn verifies_parent_existence() { - // when there's no parent in the storage - let header = HeaderBuilder::with_number(1).sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::MissingParentBlock)); - - // when parent is in the storage - let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::MissingParentBlock)); - } - - #[test] - fn verifies_step() { - // when step is missing from seals - let mut header = AuraHeader { - seal: vec![vec![], vec![]], - gas_limit: test_aura_config().min_gas_limit, - parent_hash: genesis().compute_hash(), - ..Default::default() - }; - assert_eq!(default_verify(&header), Err(Error::MissingStep)); - - // when step is the same as for the parent block - header.seal[0] = rlp_encode(&42u64).to_vec(); - assert_eq!(default_verify(&header), Err(Error::DoubleVote)); - - // when step is OK - header.seal[0] = rlp_encode(&43u64).to_vec(); - assert_ne!(default_verify(&header), Err(Error::DoubleVote)); - - // now check with validate_step check enabled - let mut config = test_aura_config(); - config.validate_step_transition = 0; - - // when step is lesser that for the parent block - header.seal[0] = rlp_encode(&40u64).to_vec(); - header.seal = vec![vec![40], vec![]]; - assert_eq!(verify_with_config(&config, &header), Err(Error::DoubleVote)); - - // when step is OK - header.seal[0] = rlp_encode(&44u64).to_vec(); - assert_ne!(verify_with_config(&config, &header), Err(Error::DoubleVote)); - } - - #[test] - fn verifies_empty_step() { - let mut config = test_aura_config(); - config.empty_steps_transition = 0; - - // when empty step duplicates parent step - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(0), GENESIS_STEP)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when empty step signature check fails - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(100), GENESIS_STEP + 1)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when we are accepting strict empty steps and they come not in order - config.strict_empty_steps_transition = 0; - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(2), GENESIS_STEP + 2), (&validator(1), GENESIS_STEP + 1)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when empty steps are OK - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(1), GENESIS_STEP + 1), (&validator(2), GENESIS_STEP + 2)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - } - - #[test] - fn verifies_chain_score() { - let mut config = test_aura_config(); - config.validate_score_transition = 0; - - // when chain score is invalid - let header = HeaderBuilder::with_parent(&genesis()) - .difficulty(100.into()) - .sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); - - // when chain score is accepted - let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); - } - - #[test] - fn verifies_validator() { - let good_header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(1)); - - // when header author is invalid - let mut header = good_header.clone(); - header.author = Default::default(); - assert_eq!(default_verify(&header), Err(Error::NotValidator)); - - // when header signature is invalid - let mut header = good_header.clone(); - header.seal[1] = rlp_encode(&H520::default()).to_vec(); - assert_eq!(default_verify(&header), Err(Error::NotValidator)); - - // when everything is OK - assert_eq!(default_verify(&good_header).map(|_| ()), Ok(())); - } - - #[test] - fn pool_verifies_known_blocks() { - // when header is known - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(2).sign_by_set(validators), - None - )), - Err(Error::KnownHeader), - ); - } - - #[test] - fn pool_verifies_ancient_blocks() { - // when header number is less than finalized - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(validators), - None, - ),), - Err(Error::AncientHeader), - ); - } - - #[test] - fn pool_rejects_headers_without_required_receipts() { - assert_eq!( - default_accept_into_pool(|_| ( - AuraHeader { - number: 20_000_000, - seal: vec![vec![], vec![]], - gas_limit: test_aura_config().min_gas_limit, - log_bloom: (&[0xff; 256]).into(), - ..Default::default() - }, - None, - ),), - Err(Error::MissingTransactionsReceipts), - ); - } - - #[test] - fn pool_rejects_headers_with_redundant_receipts() { - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(3).sign_by_set(validators), - Some(vec![Receipt { - gas_used: 1.into(), - log_bloom: (&[0xff; 256]).into(), - logs: vec![], - outcome: TransactionOutcome::Unknown, - }]), - ),), - Err(Error::RedundantTransactionsReceipts), - ); - } - - #[test] - fn pool_verifies_future_block_number() { - // when header is too far from the future - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_number(100).sign_by_set(validators), - None - ),), - Err(Error::UnsignedTooFarInTheFuture), - ); - } - - #[test] - fn pool_performs_full_verification_when_parent_is_known() { - // if parent is known, then we'll execute contextual_checks, which - // checks for DoubleVote - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(3) - .step(GENESIS_STEP + 3) - .sign_by_set(validators), - None, - ),), - Err(Error::DoubleVote), - ); - } - - #[test] - fn pool_performs_validators_checks_when_parent_is_unknown() { - // if parent is unknown, then we still need to check if header has required signature - // (even if header will be considered invalid/duplicate later, we can use this signature - // as a proof of malicious action by this validator) - assert_eq!( - default_accept_into_pool(|_| ( - HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)), - None, - )), - Err(Error::NotValidator), - ); - } - - #[test] - fn pool_verifies_header_with_known_parent() { - let mut hash = None; - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3).sign_by_set(validators); - hash = Some(header.compute_hash()); - (header, None) - }), - Ok(( - // no tags are required - vec![], - // header provides two tags - vec![(4u64, validators_addresses(3)[1]).encode(), (4u64, hash.unwrap()).encode(),], - )), - ); - } - - #[test] - fn pool_verifies_header_with_unknown_parent() { - let mut id = None; - let mut parent_id = None; - assert_eq!( - default_accept_into_pool(|validators| { - let header = - HeaderBuilder::with_number(5).step(GENESIS_STEP + 5).sign_by_set(validators); - id = Some(header.compute_id()); - parent_id = header.parent_id(); - (header, None) - }), - Ok(( - // parent tag required - vec![parent_id.unwrap().encode()], - // header provides two tags - vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], - )), - ); - } - - #[test] - fn pool_uses_next_validators_set_when_finalized_fails() { - assert_eq!( - default_accept_into_pool(|actual_validators| { - // change finalized set at parent header - change_validators_set_at(3, validators_addresses(1), None); - - // header is signed using wrong set - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 2) - .sign_by_set(actual_validators); - - (header, None) - }), - Err(Error::NotValidator), - ); - - let mut id = None; - let mut parent_id = None; - assert_eq!( - default_accept_into_pool(|actual_validators| { - // change finalized set at parent header + signal valid set at parent block - change_validators_set_at( - 3, - validators_addresses(10), - Some(validators_addresses(3)), - ); - - // header is signed using wrong set - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 2) - .sign_by_set(actual_validators); - id = Some(header.compute_id()); - parent_id = header.parent_id(); - - (header, None) - }), - Ok(( - // parent tag required - vec![parent_id.unwrap().encode(),], - // header provides two tags - vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], - )), - ); - } - - #[test] - fn pool_rejects_headers_with_invalid_receipts() { - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3) - .log_bloom((&[0xff; 256]).into()) - .sign_by_set(validators); - (header, Some(vec![validators_change_receipt(Default::default())])) - }), - Err(Error::TransactionsReceiptsMismatch), - ); - } - - #[test] - fn pool_accepts_headers_with_valid_receipts() { - let mut hash = None; - let receipts = vec![validators_change_receipt(Default::default())]; - let receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); - - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3) - .log_bloom((&[0xff; 256]).into()) - .receipts_root(receipts_root) - .sign_by_set(validators); - hash = Some(header.compute_hash()); - (header, Some(receipts.clone())) - }), - Ok(( - // no tags are required - vec![], - // header provides two tags - vec![(4u64, validators_addresses(3)[1]).encode(), (4u64, hash.unwrap()).encode(),], - )), - ); - } -} diff --git a/bridges/primitives/currency-exchange/Cargo.toml b/bridges/primitives/currency-exchange/Cargo.toml deleted file mode 100644 index 165891f0c6b1..000000000000 --- a/bridges/primitives/currency-exchange/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "bp-currency-exchange" -description = "Primitives of currency exchange module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-api/std", - "sp-std/std", -] diff --git a/bridges/primitives/currency-exchange/src/lib.rs b/bridges/primitives/currency-exchange/src/lib.rs deleted file mode 100644 index 1a30915b1cbf..000000000000 --- a/bridges/primitives/currency-exchange/src/lib.rs +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -use codec::{Decode, Encode, EncodeLike}; -use frame_support::{Parameter, RuntimeDebug}; -use scale_info::TypeInfo; -use sp_api::decl_runtime_apis; -use sp_std::marker::PhantomData; - -/// All errors that may happen during exchange. -#[derive(RuntimeDebug, PartialEq)] -pub enum Error { - /// Invalid peer blockchain transaction provided. - InvalidTransaction, - /// Peer transaction has invalid amount. - InvalidAmount, - /// Peer transaction has invalid recipient. - InvalidRecipient, - /// Cannot map from peer recipient to this blockchain recipient. - FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockchain currency. - FailedToConvertCurrency, - /// Deposit has failed. - DepositFailed, - /// Deposit has partially failed (changes to recipient account were made). - DepositPartiallyFailed, -} - -/// Result of all exchange operations. -pub type Result = sp_std::result::Result; - -/// Peer blockchain lock funds transaction. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct LockFundsTransaction { - /// Something that uniquely identifies this transfer. - pub id: TransferId, - /// Funds recipient on the peer chain. - pub recipient: Recipient, - /// Amount of the locked funds. - pub amount: Amount, -} - -/// Peer blockchain transaction that may represent lock funds transaction. -pub trait MaybeLockFundsTransaction { - /// Transaction type. - type Transaction; - /// Identifier that uniquely identifies this transfer. - type Id: Decode + Encode + TypeInfo + EncodeLike + sp_std::fmt::Debug; - /// Peer recipient type. - type Recipient; - /// Peer currency amount type. - type Amount; - - /// Parse lock funds transaction of the peer blockchain. Returns None if - /// transaction format is unknown, or it isn't a lock funds transaction. - fn parse( - tx: &Self::Transaction, - ) -> Result>; -} - -/// Map that maps recipients from peer blockchain to this blockchain recipients. -pub trait RecipientsMap { - /// Peer blockchain recipient type. - type PeerRecipient; - /// Current blockchain recipient type. - type Recipient; - - /// Lookup current blockchain recipient by peer blockchain recipient. - fn map(peer_recipient: Self::PeerRecipient) -> Result; -} - -/// Conversion between two currencies. -pub trait CurrencyConverter { - /// Type of the source currency amount. - type SourceAmount; - /// Type of the target currency amount. - type TargetAmount; - - /// Covert from source to target currency. - fn convert(amount: Self::SourceAmount) -> Result; -} - -/// Currency deposit. -pub trait DepositInto { - /// Recipient type. - type Recipient; - /// Currency amount type. - type Amount; - - /// Grant some money to given account. - fn deposit_into(recipient: Self::Recipient, amount: Self::Amount) -> Result<()>; -} - -/// Recipients map which is used when accounts ids are the same on both chains. -#[derive(Debug)] -pub struct IdentityRecipients(PhantomData); - -impl RecipientsMap for IdentityRecipients { - type PeerRecipient = AccountId; - type Recipient = AccountId; - - fn map(peer_recipient: Self::PeerRecipient) -> Result { - Ok(peer_recipient) - } -} - -/// Currency converter which is used when currency is the same on both chains. -#[derive(Debug)] -pub struct IdentityCurrencyConverter(PhantomData); - -impl CurrencyConverter for IdentityCurrencyConverter { - type SourceAmount = Amount; - type TargetAmount = Amount; - - fn convert(currency: Self::SourceAmount) -> Result { - Ok(currency) - } -} - -decl_runtime_apis! { - /// API for Rialto exchange transactions submitters. - pub trait RialtoCurrencyExchangeApi { - /// Returns true if currency exchange module is able to import transaction proof in - /// its current state. - fn filter_transaction_proof(proof: Proof) -> bool; - } - - /// API for Kovan exchange transactions submitters. - pub trait KovanCurrencyExchangeApi { - /// Returns true if currency exchange module is able to import transaction proof in - /// its current state. - fn filter_transaction_proof(proof: Proof) -> bool; - } -} diff --git a/bridges/primitives/ethereum-poa/Cargo.toml b/bridges/primitives/ethereum-poa/Cargo.toml deleted file mode 100644 index 71f071bbf0e6..000000000000 --- a/bridges/primitives/ethereum-poa/Cargo.toml +++ /dev/null @@ -1,59 +0,0 @@ -[package] -name = "bp-eth-poa" -description = "Primitives of Ethereum PoA Bridge module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } -ethbloom = { version = "0.10.0", default-features = false, features = ["rlp"] } -fixed-hash = { version = "0.7", default-features = false } -hash-db = { version = "0.15.2", default-features = false } -impl-rlp = { version = "0.3", default-features = false } -impl-serde = { version = "0.3.1", optional = true } -libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac", "static-context"] } -parity-bytes = { version = "0.1", default-features = false } -plain_hasher = { version = "0.2.2", default-features = false } -primitive-types = { version = "0.10", default-features = false, features = ["codec", "rlp"] } -rlp = { version = "0.5", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } -serde-big-array = { version = "0.2", optional = true } -triehash = { version = "0.8.2", default-features = false } - -# Substrate Dependencies - -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -hex-literal = "0.2" - -[features] -default = ["std"] -std = [ - "codec/std", - "ethbloom/std", - "fixed-hash/std", - "hash-db/std", - "impl-rlp/std", - "impl-serde", - "libsecp256k1/std", - "parity-bytes/std", - "plain_hasher/std", - "primitive-types/std", - "primitive-types/serde", - "rlp/std", - "scale-info/std", - "serde/std", - "serde-big-array", - "sp-api/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "triehash/std", -] diff --git a/bridges/primitives/ethereum-poa/src/lib.rs b/bridges/primitives/ethereum-poa/src/lib.rs deleted file mode 100644 index 58f5731e5222..000000000000 --- a/bridges/primitives/ethereum-poa/src/lib.rs +++ /dev/null @@ -1,732 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -pub use parity_bytes::Bytes; -pub use primitive_types::{H160, H256, H512, U128, U256}; -pub use rlp::encode as rlp_encode; - -use codec::{Decode, Encode}; -use ethbloom::{Bloom as EthBloom, Input as BloomInput}; -use fixed_hash::construct_fixed_hash; -use rlp::{Decodable, DecoderError, Rlp, RlpStream}; -use scale_info::TypeInfo; -use sp_io::hashing::keccak_256; -use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; - -use impl_rlp::impl_fixed_hash_rlp; -#[cfg(feature = "std")] -use impl_serde::impl_fixed_hash_serde; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use serde_big_array::big_array; - -construct_fixed_hash! { pub struct H520(65); } -impl_fixed_hash_rlp!(H520, 65); -#[cfg(feature = "std")] -impl_fixed_hash_serde!(H520, 65); - -/// Raw (RLP-encoded) ethereum transaction. -pub type RawTransaction = Vec; - -/// Raw (RLP-encoded) ethereum transaction receipt. -pub type RawTransactionReceipt = Vec; - -/// An ethereum address. -pub type Address = H160; - -pub mod signatures; - -/// Complete header id. -#[derive(Encode, Decode, Default, RuntimeDebug, PartialEq, Clone, Copy, TypeInfo)] -pub struct HeaderId { - /// Header number. - pub number: u64, - /// Header hash. - pub hash: H256, -} - -/// An Aura header. -#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct AuraHeader { - /// Parent block hash. - pub parent_hash: H256, - /// Block timestamp. - pub timestamp: u64, - /// Block number. - pub number: u64, - /// Block author. - pub author: Address, - - /// Transactions root. - pub transactions_root: H256, - /// Block uncles hash. - pub uncles_hash: H256, - /// Block extra data. - pub extra_data: Bytes, - - /// State root. - pub state_root: H256, - /// Block receipts root. - pub receipts_root: H256, - /// Block bloom. - pub log_bloom: Bloom, - /// Gas used for contracts execution. - pub gas_used: U256, - /// Block gas limit. - pub gas_limit: U256, - - /// Block difficulty. - pub difficulty: U256, - /// Vector of post-RLP-encoded fields. - pub seal: Vec, -} - -/// Parsed ethereum transaction. -#[derive(PartialEq, RuntimeDebug)] -pub struct Transaction { - /// Sender address. - pub sender: Address, - /// Unsigned portion of ethereum transaction. - pub unsigned: UnsignedTransaction, -} - -/// Unsigned portion of ethereum transaction. -#[derive(Clone, PartialEq, RuntimeDebug)] -pub struct UnsignedTransaction { - /// Sender nonce. - pub nonce: U256, - /// Gas price. - pub gas_price: U256, - /// Gas limit. - pub gas: U256, - /// Transaction destination address. None if it is contract creation transaction. - pub to: Option
, - /// Value. - pub value: U256, - /// Associated data. - pub payload: Bytes, -} - -/// Information describing execution of a transaction. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct Receipt { - /// The total gas used in the block following execution of the transaction. - pub gas_used: U256, - /// The OR-wide combination of all logs' blooms for this transaction. - pub log_bloom: Bloom, - /// The logs stemming from this transaction. - pub logs: Vec, - /// Transaction outcome. - pub outcome: TransactionOutcome, -} - -/// Transaction outcome store in the receipt. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub enum TransactionOutcome { - /// Status and state root are unknown under EIP-98 rules. - Unknown, - /// State root is known. Pre EIP-98 and EIP-658 rules. - StateRoot(H256), - /// Status code is known. EIP-658 rules. - StatusCode(u8), -} - -/// A record of execution for a `LOG` operation. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct LogEntry { - /// The address of the contract executing at the point of the `LOG` operation. - pub address: Address, - /// The topics associated with the `LOG` operation. - pub topics: Vec, - /// The data associated with the `LOG` operation. - pub data: Bytes, -} - -/// Logs bloom. -#[derive(Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct Bloom(#[cfg_attr(feature = "std", serde(with = "BigArray"))] [u8; 256]); - -#[cfg(feature = "std")] -big_array! { BigArray; } - -/// An empty step message that is included in a seal, the only difference is that it doesn't include -/// the `parent_hash` in order to save space. The included signature is of the original empty step -/// message, which can be reconstructed by using the parent hash of the block in which this sealed -/// empty message is included. -pub struct SealedEmptyStep { - /// Signature of the original message author. - pub signature: H520, - /// The step this message is generated for. - pub step: u64, -} - -impl AuraHeader { - /// Compute id of this header. - pub fn compute_id(&self) -> HeaderId { - HeaderId { number: self.number, hash: self.compute_hash() } - } - - /// Compute hash of this header (keccak of the RLP with seal). - pub fn compute_hash(&self) -> H256 { - keccak_256(&self.rlp(true)).into() - } - - /// Get id of this header' parent. Returns None if this is genesis header. - pub fn parent_id(&self) -> Option { - self.number - .checked_sub(1) - .map(|parent_number| HeaderId { number: parent_number, hash: self.parent_hash }) - } - - /// Check if passed transactions receipts are matching receipts root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_receipts_root(&self, receipts: &[Receipt]) -> Result { - check_merkle_proof(self.receipts_root, receipts.iter().map(|r| r.rlp())) - } - - /// Check if passed raw transactions receipts are matching receipts root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_raw_receipts_root<'a>( - &self, - receipts: impl IntoIterator, - ) -> Result { - check_merkle_proof(self.receipts_root, receipts.into_iter()) - } - - /// Check if passed transactions are matching transactions root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_transactions_root<'a>( - &self, - transactions: impl IntoIterator, - ) -> Result { - check_merkle_proof(self.transactions_root, transactions.into_iter()) - } - - /// Gets the seal hash of this header. - pub fn seal_hash(&self, include_empty_steps: bool) -> Option { - Some(match include_empty_steps { - true => { - let mut message = self.compute_hash().as_bytes().to_vec(); - message.extend_from_slice(self.seal.get(2)?); - keccak_256(&message).into() - }, - false => keccak_256(&self.rlp(false)).into(), - }) - } - - /// Get step this header is generated for. - pub fn step(&self) -> Option { - self.seal.get(0).map(|x| Rlp::new(x)).and_then(|x| x.as_val().ok()) - } - - /// Get header author' signature. - pub fn signature(&self) -> Option { - self.seal.get(1).and_then(|x| Rlp::new(x).as_val().ok()) - } - - /// Extracts the empty steps from the header seal. - pub fn empty_steps(&self) -> Option> { - self.seal.get(2).and_then(|x| Rlp::new(x).as_list::().ok()) - } - - /// Returns header RLP with or without seals. - fn rlp(&self, with_seal: bool) -> Bytes { - let mut s = RlpStream::new(); - if with_seal { - s.begin_list(13 + self.seal.len()); - } else { - s.begin_list(13); - } - - s.append(&self.parent_hash); - s.append(&self.uncles_hash); - s.append(&self.author); - s.append(&self.state_root); - s.append(&self.transactions_root); - s.append(&self.receipts_root); - s.append(&EthBloom::from(self.log_bloom.0)); - s.append(&self.difficulty); - s.append(&self.number); - s.append(&self.gas_limit); - s.append(&self.gas_used); - s.append(&self.timestamp); - s.append(&self.extra_data); - - if with_seal { - for b in &self.seal { - s.append_raw(b, 1); - } - } - - s.out().to_vec() - } -} - -impl UnsignedTransaction { - /// Decode unsigned portion of raw transaction RLP. - pub fn decode_rlp(raw_tx: &[u8]) -> Result { - let tx_rlp = Rlp::new(raw_tx); - let to = tx_rlp.at(3)?; - Ok(UnsignedTransaction { - nonce: tx_rlp.val_at(0)?, - gas_price: tx_rlp.val_at(1)?, - gas: tx_rlp.val_at(2)?, - to: match to.is_empty() { - false => Some(to.as_val()?), - true => None, - }, - value: tx_rlp.val_at(4)?, - payload: tx_rlp.val_at(5)?, - }) - } - - /// Returns message that has to be signed to sign this transaction. - pub fn message(&self, chain_id: Option) -> H256 { - keccak_256(&self.rlp(chain_id)).into() - } - - /// Returns unsigned transaction RLP. - pub fn rlp(&self, chain_id: Option) -> Bytes { - let mut stream = RlpStream::new_list(if chain_id.is_some() { 9 } else { 6 }); - self.rlp_to(chain_id, &mut stream); - stream.out().to_vec() - } - - /// Encode to given RLP stream. - pub fn rlp_to(&self, chain_id: Option, stream: &mut RlpStream) { - stream.append(&self.nonce); - stream.append(&self.gas_price); - stream.append(&self.gas); - match self.to { - Some(to) => stream.append(&to), - None => stream.append(&""), - }; - stream.append(&self.value); - stream.append(&self.payload); - if let Some(chain_id) = chain_id { - stream.append(&chain_id); - stream.append(&0u8); - stream.append(&0u8); - } - } -} - -impl Receipt { - /// Decode status from raw transaction receipt RLP. - pub fn is_successful_raw_receipt(raw_receipt: &[u8]) -> Result { - let rlp = Rlp::new(raw_receipt); - if rlp.item_count()? == 3 { - // no outcome - invalid tx? - Ok(false) - } else { - let first = rlp.at(0)?; - if first.is_data() && first.data()?.len() <= 1 { - // EIP-658 transaction - status of successful transaction is 1 - let status: u8 = first.as_val()?; - Ok(status == 1) - } else { - // pre-EIP-658 transaction - we do not support this kind of transactions - Ok(false) - } - } - } - - /// Returns receipt RLP. - pub fn rlp(&self) -> Bytes { - let mut s = RlpStream::new(); - match self.outcome { - TransactionOutcome::Unknown => { - s.begin_list(3); - }, - TransactionOutcome::StateRoot(ref root) => { - s.begin_list(4); - s.append(root); - }, - TransactionOutcome::StatusCode(ref status_code) => { - s.begin_list(4); - s.append(status_code); - }, - } - s.append(&self.gas_used); - s.append(&EthBloom::from(self.log_bloom.0)); - - s.begin_list(self.logs.len()); - for log in &self.logs { - s.begin_list(3); - s.append(&log.address); - s.begin_list(log.topics.len()); - for topic in &log.topics { - s.append(topic); - } - s.append(&log.data); - } - - s.out().to_vec() - } -} - -impl SealedEmptyStep { - /// Returns message that has to be signed by the validator. - pub fn message(&self, parent_hash: &H256) -> H256 { - let mut message = RlpStream::new_list(2); - message.append(&self.step); - message.append(parent_hash); - keccak_256(&message.out()).into() - } - - /// Returns RLP for the vector of empty steps (we only do encoding in tests). - pub fn rlp_of(empty_steps: &[SealedEmptyStep]) -> Bytes { - let mut s = RlpStream::new(); - s.begin_list(empty_steps.len()); - for empty_step in empty_steps { - s.begin_list(2).append(&empty_step.signature).append(&empty_step.step); - } - s.out().to_vec() - } -} - -impl Decodable for SealedEmptyStep { - fn decode(rlp: &Rlp) -> Result { - let signature: H520 = rlp.val_at(0)?; - let step = rlp.val_at(1)?; - - Ok(SealedEmptyStep { signature, step }) - } -} - -impl LogEntry { - /// Calculates the bloom of this log entry. - pub fn bloom(&self) -> Bloom { - let eth_bloom = self.topics.iter().fold( - EthBloom::from(BloomInput::Raw(self.address.as_bytes())), - |mut b, t| { - b.accrue(BloomInput::Raw(t.as_bytes())); - b - }, - ); - Bloom(*eth_bloom.data()) - } -} - -impl Bloom { - /// Returns true if this bloom has all bits from the other set. - pub fn contains(&self, other: &Bloom) -> bool { - self.0.iter().zip(other.0.iter()).all(|(l, r)| (l & r) == *r) - } -} - -impl<'a> From<&'a [u8; 256]> for Bloom { - fn from(buffer: &'a [u8; 256]) -> Bloom { - Bloom(*buffer) - } -} - -impl PartialEq for Bloom { - fn eq(&self, other: &Bloom) -> bool { - self.0.iter().zip(other.0.iter()).all(|(l, r)| l == r) - } -} - -// there's no default for [_; 256], but clippy still complains -#[allow(clippy::derivable_impls)] -impl Default for Bloom { - fn default() -> Self { - Bloom([0; 256]) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Debug for Bloom { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Bloom").finish() - } -} - -/// Decode Ethereum transaction. -pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result { - // parse transaction fields - let unsigned = UnsignedTransaction::decode_rlp(raw_tx)?; - let tx_rlp = Rlp::new(raw_tx); - let v: u64 = tx_rlp.val_at(6)?; - let r: U256 = tx_rlp.val_at(7)?; - let s: U256 = tx_rlp.val_at(8)?; - - // reconstruct signature - let mut signature = [0u8; 65]; - let (chain_id, v) = match v { - v if v == 27u64 => (None, 0), - v if v == 28u64 => (None, 1), - v if v >= 35u64 => (Some((v - 35) / 2), ((v - 1) % 2) as u8), - _ => (None, 4), - }; - r.to_big_endian(&mut signature[0..32]); - s.to_big_endian(&mut signature[32..64]); - signature[64] = v; - - // reconstruct message that has been signed - let message = unsigned.message(chain_id); - - // recover tx sender - let sender_public = - sp_io::crypto::secp256k1_ecdsa_recover(&signature, message.as_fixed_bytes()) - .map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?; - let sender_address = public_to_address(&sender_public); - - Ok(Transaction { sender: sender_address, unsigned }) -} - -/// Convert public key into corresponding ethereum address. -pub fn public_to_address(public: &[u8; 64]) -> Address { - let hash = keccak_256(public); - let mut result = Address::zero(); - result.as_bytes_mut().copy_from_slice(&hash[12..]); - result -} - -/// Check ethereum merkle proof. -/// Returns Ok(computed-root) if check succeeds. -/// Returns Err(computed-root) if check fails. -fn check_merkle_proof>( - expected_root: H256, - items: impl Iterator, -) -> Result { - let computed_root = compute_merkle_root(items); - if computed_root == expected_root { - Ok(computed_root) - } else { - Err(computed_root) - } -} - -/// Compute ethereum merkle root. -pub fn compute_merkle_root>(items: impl Iterator) -> H256 { - struct Keccak256Hasher; - - impl hash_db::Hasher for Keccak256Hasher { - type Out = H256; - type StdHasher = plain_hasher::PlainHasher; - const LENGTH: usize = 32; - fn hash(x: &[u8]) -> Self::Out { - keccak_256(x).into() - } - } - - triehash::ordered_trie_root::(items) -} - -/// Get validator that should author the block at given step. -pub fn step_validator(header_validators: &[T], header_step: u64) -> &T { - &header_validators[(header_step % header_validators.len() as u64) as usize] -} - -sp_api::decl_runtime_apis! { - /// API for querying information about headers from the Rialto Bridge Pallet - pub trait RialtoPoAHeaderApi { - /// Returns number and hash of the best block known to the bridge module. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_block() -> (u64, H256); - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (u64, H256); - /// Returns true if the import of given block requires transactions receipts. - fn is_import_requires_receipts(header: AuraHeader) -> bool; - /// Returns true if header is known to the runtime. - fn is_known_block(hash: H256) -> bool; - } - - /// API for querying information about headers from the Kovan Bridge Pallet - pub trait KovanHeaderApi { - /// Returns number and hash of the best block known to the bridge module. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_block() -> (u64, H256); - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (u64, H256); - /// Returns true if the import of given block requires transactions receipts. - fn is_import_requires_receipts(header: AuraHeader) -> bool; - /// Returns true if header is known to the runtime. - fn is_known_block(hash: H256) -> bool; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn transfer_transaction_decode_works() { - // value transfer transaction - // https://etherscan.io/tx/0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd - // https://etherscan.io/getRawTx?tx=0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd - let raw_tx = hex!("f86c0a85046c7cfe0083016dea94d1310c1e038bc12865d3d3997275b3e4737c6302880b503be34d9fe80080269fc7eaaa9c21f59adf8ad43ed66cf5ef9ee1c317bd4d32cd65401e7aaca47cfaa0387d79c65b90be6260d09dcfb780f29dd8133b9b1ceb20b83b7e442b4bfc30cb"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("67835910d32600471f388a137bbff3eb07993c04").into(), - unsigned: UnsignedTransaction { - nonce: 10.into(), - gas_price: 19000000000u64.into(), - gas: 93674.into(), - to: Some(hex!("d1310c1e038bc12865d3d3997275b3e4737c6302").into()), - value: 815217380000000000_u64.into(), - payload: Default::default(), - } - }), - ); - - // Kovan value transfer transaction - // https://kovan.etherscan.io/tx/0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da - // https://kovan.etherscan.io/getRawTx?tx=0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da - let raw_tx = hex!("f86a822816808252089470c1ccde719d6f477084f07e4137ab0e55f8369f8930cf46e92063afd8008078a00e4d1f4d8aa992bda3c105ff3d6e9b9acbfd99facea00985e2131029290adbdca028ea29a46a4b66ec65b454f0706228e3768cb0ecf755f67c50ddd472f11d5994"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), - unsigned: UnsignedTransaction { - nonce: 10262.into(), - gas_price: 0.into(), - gas: 21000.into(), - to: Some(hex!("70c1ccde719d6f477084f07e4137ab0e55f8369f").into()), - value: 900379597077600000000_u128.into(), - payload: Default::default(), - }, - }), - ); - } - - #[test] - fn payload_transaction_decode_works() { - // contract call transaction - // https://etherscan.io/tx/0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 - // https://etherscan.io/getRawTx?tx=0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 - let raw_tx = hex!("f8aa76850430e234008301500094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b4025a0964e023999621dc3d4d831c43c71f7555beb6d1192dee81a3674b3f57e310f21a00f229edd86f841d1ee4dc48cc16667e2283817b1d39bae16ced10cd206ae4fd4"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("2b9a4d37bdeecdf994c4c9ad7f3cf8dc632f7d70").into(), - unsigned: UnsignedTransaction { - nonce: 118.into(), - gas_price: 18000000000u64.into(), - gas: 86016.into(), - to: Some(hex!("dac17f958d2ee523a2206206994597c13d831ec7").into()), - value: 0.into(), - payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec(), - }, - }), - ); - - // Kovan contract call transaction - // https://kovan.etherscan.io/tx/0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf - // https://kovan.etherscan.io/getRawTx?tx=0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf - let raw_tx = hex!("f8ac8302200b843b9aca00830271009484dd11eb2a29615303d18149c0dbfa24167f896680b844a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b600000000000000000000000000000000000000000000000000000000000027101ba0ce126d2cca81f5e245f292ff84a0d915c0a4ac52af5c51219db1e5d36aa8da35a0045298b79dac631907403888f9b04c2ab5509fe0cc31785276d30a40b915fcf9"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("617da121abf03d4c1af572f5a4e313e26bef7bdc").into(), - unsigned: UnsignedTransaction { - nonce: 139275.into(), - gas_price: 1000000000.into(), - gas: 160000.into(), - to: Some(hex!("84dd11eb2a29615303d18149c0dbfa24167f8966").into()), - value: 0.into(), - payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec(), - }, - }), - ); - } - - #[test] - fn is_successful_raw_receipt_works() { - assert!(Receipt::is_successful_raw_receipt(&[]).is_err()); - - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::Unknown, - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StateRoot(Default::default()), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StatusCode(0), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StatusCode(1), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(true), - ); - } - - #[test] - fn is_successful_raw_receipt_with_empty_data() { - let mut stream = RlpStream::new(); - stream.begin_list(4); - stream.append_empty_data(); - stream.append(&1u64); - stream.append(&2u64); - stream.append(&3u64); - - assert_eq!(Receipt::is_successful_raw_receipt(&stream.out()), Ok(false),); - } -} diff --git a/bridges/primitives/ethereum-poa/src/signatures.rs b/bridges/primitives/ethereum-poa/src/signatures.rs deleted file mode 100644 index 26371f2166ad..000000000000 --- a/bridges/primitives/ethereum-poa/src/signatures.rs +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . -// - -//! Helpers related to signatures. -//! -//! Used for testing and benchmarking. - -// reexport to avoid direct secp256k1 deps by other crates -pub use libsecp256k1::SecretKey; - -use crate::{ - public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction, - UnsignedTransaction, H256, H520, U256, -}; - -use libsecp256k1::{Message, PublicKey}; - -/// Utilities for signing headers. -pub trait SignHeader { - /// Signs header by given author. - fn sign_by(self, author: &SecretKey) -> AuraHeader; - /// Signs header by given authors set. - fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader; -} - -/// Utilities for signing transactions. -pub trait SignTransaction { - /// Sign transaction by given author. - fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction; -} - -impl SignHeader for AuraHeader { - fn sign_by(mut self, author: &SecretKey) -> Self { - self.author = secret_to_address(author); - - let message = self.seal_hash(false).unwrap(); - let signature = sign(author, message); - self.seal[1] = rlp_encode(&signature).to_vec(); - self - } - - fn sign_by_set(self, authors: &[SecretKey]) -> Self { - let step = self.step().unwrap(); - let author = step_validator(authors, step); - self.sign_by(author) - } -} - -impl SignTransaction for UnsignedTransaction { - fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction { - let message = self.message(chain_id); - let signature = sign(author, message); - let signature_r = U256::from_big_endian(&signature.as_fixed_bytes()[..32][..]); - let signature_s = U256::from_big_endian(&signature.as_fixed_bytes()[32..64][..]); - let signature_v = signature.as_fixed_bytes()[64] as u64; - let signature_v = signature_v + if let Some(n) = chain_id { 35 + n * 2 } else { 27 }; - - let mut stream = rlp::RlpStream::new_list(9); - self.rlp_to(None, &mut stream); - stream.append(&signature_v); - stream.append(&signature_r); - stream.append(&signature_s); - stream.out().to_vec() - } -} - -/// Return author's signature over given message. -pub fn sign(author: &SecretKey, message: H256) -> H520 { - let (signature, recovery_id) = - libsecp256k1::sign(&Message::parse(message.as_fixed_bytes()), author); - let mut raw_signature = [0u8; 65]; - raw_signature[..64].copy_from_slice(&signature.serialize()); - raw_signature[64] = recovery_id.serialize(); - raw_signature.into() -} - -/// Returns address corresponding to given secret key. -pub fn secret_to_address(secret: &SecretKey) -> Address { - let public = PublicKey::from_secret_key(secret); - let mut raw_public = [0u8; 64]; - raw_public.copy_from_slice(&public.serialize()[1..]); - public_to_address(&raw_public) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{transaction_decode_rlp, Transaction}; - - #[test] - fn transaction_signed_properly() { - // case1: with chain_id replay protection + to - let signer = SecretKey::parse(&[1u8; 32]).unwrap(); - let signer_address = secret_to_address(&signer); - let unsigned = UnsignedTransaction { - nonce: 100.into(), - gas_price: 200.into(), - gas: 300.into(), - to: Some([42u8; 20].into()), - value: 400.into(), - payload: vec![1, 2, 3], - }; - let raw_tx = unsigned.clone().sign_by(&signer, Some(42)); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { sender: signer_address, unsigned }), - ); - - // case2: without chain_id replay protection + contract creation - let unsigned = UnsignedTransaction { - nonce: 100.into(), - gas_price: 200.into(), - gas: 300.into(), - to: None, - value: 400.into(), - payload: vec![1, 2, 3], - }; - let raw_tx = unsigned.clone().sign_by(&signer, None); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { sender: signer_address, unsigned }), - ); - } -} diff --git a/bridges/relays/bin-ethereum/Cargo.toml b/bridges/relays/bin-ethereum/Cargo.toml deleted file mode 100644 index 610dee2c3ce9..000000000000 --- a/bridges/relays/bin-ethereum/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "ethereum-poa-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -anyhow = "1.0" -async-std = "1.9.0" -async-trait = "0.1.42" -clap = { version = "2.33.3", features = ["yaml"] } -codec = { package = "parity-scale-codec", version = "2.2.0" } -ethabi = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" } -ethabi-contract = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" } -ethabi-derive = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" } -futures = "0.3.12" -hex = "0.4" -hex-literal = "0.3" -libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"] } -log = "0.4.14" -num-traits = "0.2" -serde_json = "1.0.64" -thiserror = "1.0.26" - -# Bridge dependencies - -bp-currency-exchange = { path = "../../primitives/currency-exchange" } -bp-eth-poa = { path = "../../primitives/ethereum-poa" } -exchange-relay = { path = "../exchange" } -headers-relay = { path = "../headers" } -relay-ethereum-client = { path = "../client-ethereum" } -relay-rialto-client = { path = "../client-rialto" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/bridges/relays/bin-ethereum/README.md b/bridges/relays/bin-ethereum/README.md deleted file mode 100644 index 9fe2f623fd05..000000000000 --- a/bridges/relays/bin-ethereum/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# PoA <> Substrate Bridge - -**DISCLAIMER:** *we recommend not using the bridge in "production" (to bridge significant amounts) just yet. -it's missing a code audit and should still be considered alpha. we can't rule out that there are bugs that might result in loss of the bridged amounts. -we'll update this disclaimer once that changes* - -These docs are very incomplete yet. Describe high-level goals here in the (near) future. diff --git a/bridges/relays/bin-ethereum/res/substrate-bridge-abi.json b/bridges/relays/bin-ethereum/res/substrate-bridge-abi.json deleted file mode 100644 index b7d7b4b9152c..000000000000 --- a/bridges/relays/bin-ethereum/res/substrate-bridge-abi.json +++ /dev/null @@ -1,167 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawInitialHeader", - "type": "bytes" - }, - { - "internalType": "uint64", - "name": "initialValidatorsSetId", - "type": "uint64" - }, - { - "internalType": "bytes", - "name": "initialValidatorsSet", - "type": "bytes" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "stateMutability": "nonpayable", - "type": "fallback" - }, - { - "inputs": [], - "name": "bestKnownHeader", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "finalityTargetNumber", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "finalityTargetHash", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "rawFinalityProof", - "type": "bytes" - } - ], - "name": "importFinalityProof", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawHeader1", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader2", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader3", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader4", - "type": "bytes" - } - ], - "name": "importHeaders", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "incompleteHeaders", - "outputs": [ - { - "internalType": "uint256[]", - "name": "", - "type": "uint256[]" - }, - { - "internalType": "bytes32[]", - "name": "", - "type": "bytes32[]" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawHeader1", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader2", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader3", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader4", - "type": "bytes" - } - ], - "name": "isIncompleteHeaders", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "headerHash", - "type": "bytes32" - } - ], - "name": "isKnownHeader", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - } -] diff --git a/bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex b/bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex deleted file mode 100644 index 6dd6a33046f6..000000000000 --- a/bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex +++ /dev/null @@ -1 +0,0 @@ -60806040523480156200001157600080fd5b5060405162001af838038062001af8833981810160405260608110156200003757600080fd5b81019080805160405193929190846401000000008211156200005857600080fd5b9083019060208201858111156200006e57600080fd5b82516401000000008111828201881017156200008957600080fd5b82525081516020918201929091019080838360005b83811015620000b85781810151838201526020016200009e565b50505050905090810190601f168015620000e65780820380516001836020036101000a031916815260200191505b506040818152602083015192018051929491939192846401000000008211156200010f57600080fd5b9083019060208201858111156200012557600080fd5b82516401000000008111828201881017156200014057600080fd5b82525081516020918201929091019080838360005b838110156200016f57818101518382015260200162000155565b50505050905090810190601f1680156200019d5780820380516001836020036101000a031916815260200191505b50604052505050620001ae620003d5565b620001c2846001600160e01b03620002dc16565b805160008181556002918255604080840180516001908155825160e08101845281815260208088015181830190815293518286019081526080808a0151606085019081526001600160401b038e169185019190915260a0840188905260c084018890528951885260078352959096208251815460ff191690151517815593519284019290925593519482019490945590518051949550919390926200026f9260038501929101906200040a565b506080820151600482810180546001600160401b03199081166001600160401b039485161790915560a0850151600585015560c09094015160069093019290925560038054909316908616179091558251620002d1919060208501906200040a565b5050505050620004af565b620002e6620003d5565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa6200031c57600080fd5b84519b5083519a50825199508151985080519750505050505050506060816001600160401b03811180156200035057600080fd5b506040519080825280601f01601f1916602001820160405280156200037c576020820181803683370190505b5090508115620003a85787516020890160208301848184846011600019fa620003a457600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200044d57805160ff19168380011785556200047d565b828001600101855582156200047d579182015b828111156200047d57825182559160200191906001019062000460565b506200048b9291506200048f565b5090565b620004ac91905b808211156200048b576000815560010162000496565b90565b61163980620004bf6000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c8063374c2c26146100675780636a742c0914610108578063871ebe181461033d578063d96a2deb1461036e578063e8ffbe841461038f578063fae71ae8146105d4575b600080fd5b61006f610684565b604051808060200180602001838103835285818151815260200191508051906020019060200280838360005b838110156100b357818101518382015260200161009b565b50505050905001838103825284818151815260200191508051906020019060200280838360005b838110156100f25781810151838201526020016100da565b5050505090500194505050505060405180910390f35b61033b6004803603608081101561011e57600080fd5b810190602081018135600160201b81111561013857600080fd5b82018360208201111561014a57600080fd5b803590602001918460018302840111600160201b8311171561016b57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156101bd57600080fd5b8201836020820111156101cf57600080fd5b803590602001918460018302840111600160201b831117156101f057600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561024257600080fd5b82018360208201111561025457600080fd5b803590602001918460018302840111600160201b8311171561027557600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156102c757600080fd5b8201836020820111156102d957600080fd5b803590602001918460018302840111600160201b831117156102fa57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610789945050505050565b005b61035a6004803603602081101561035357600080fd5b50356107e5565b604080519115158252519081900360200190f35b6103766107fd565b6040805192835260208301919091528051918290030190f35b6105c2600480360360808110156103a557600080fd5b810190602081018135600160201b8111156103bf57600080fd5b8201836020820111156103d157600080fd5b803590602001918460018302840111600160201b831117156103f257600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561044457600080fd5b82018360208201111561045657600080fd5b803590602001918460018302840111600160201b8311171561047757600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156104c957600080fd5b8201836020820111156104db57600080fd5b803590602001918460018302840111600160201b831117156104fc57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561054e57600080fd5b82018360208201111561056057600080fd5b803590602001918460018302840111600160201b8311171561058157600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610815945050505050565b60408051918252519081900360200190f35b61033b600480360360608110156105ea57600080fd5b813591602081013591810190606081016040820135600160201b81111561061057600080fd5b82018360208201111561062257600080fd5b803590602001918460018302840111600160201b8311171561064357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610b28945050505050565b6005546060908190818167ffffffffffffffff811180156106a457600080fd5b506040519080825280602002602001820160405280156106ce578160200160208202803683370190505b50905060005b828110156107295760076000600583815481106106ed57fe5b906000526020600020015481526020019081526020016000206002015482828151811061071657fe5b60209081029190910101526001016106d4565b508060058080548060200260200160405190810160405280929190818152602001828054801561077857602002820191906000526020600020905b815481526020019060010190808311610764575b505050505090509350935050509091565b61079284610d8d565b61079b576107df565b8251156107b4576107ab83610d8d565b6107b4576107df565b8151156107cd576107c482610d8d565b6107cd576107df565b8051156107df576107dd81610d8d565b505b50505050565b60008181526007602052604090205460ff165b919050565b60008054808252600760205260409091206002015491565b600061081f611454565b61082886610f0e565b9050610832611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156108f85780601f106108cd576101008083540402835291602001916108f8565b820191906000526020600020905b8154815290600101906020018083116108db57829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506000806109398484611001565b945050505091506000600681111561094d57fe5b82600681111561095957fe5b146109ab576040805162461bcd60e51b815260206004820152601860248201527f43616e277420696d706f727420616e7920686561646572730000000000000000604482015290519081900360640190fd5b83604001518114156109c4576001945050505050610b20565b87516109d7576000945050505050610b20565b6109df611489565b6109e98585611171565b90506109f3611454565b6109fc8a610f0e565b90506000610a0a8284611001565b9450505050508160400151811415610a2c576002975050505050505050610b20565b8951610a42576000975050505050505050610b20565b610a4a611489565b610a548388611171565b9050610a5e611454565b610a678c610f0e565b90506000610a758284611001565b9450505050508160400151811415610a9a5760039a5050505050505050505050610b20565b8b51610ab35760009a5050505050505050505050610b20565b610abb611489565b610ac5838b611171565b9050610acf611454565b610ad88e610f0e565b90506000610ae68284611001565b9450505050508160400151811415610b0e5760049d5050505050505050505050505050610b20565b60009d50505050505050505050505050505b949350505050565b6000828152600760205260409020600201548314610b775760405162461bcd60e51b815260040180806020018281038252602f8152602001806115d5602f913960400191505060405180910390fd5b60028054600354600480546040805160206101006001851615026000190190931696909604601f81018390048302870183019091528086529394600094610c28948a948a9467ffffffffffffffff90921693929091830182828015610c1d5780601f10610bf257610100808354040283529160200191610c1d565b820191906000526020600020905b815481529060010190602001808311610c0057829003601f168201915b5050505050876111d0565b600081815260076020526040902060028281558101546001559091505b828214610d8557506000818152600760209081526040808320600181015460069093529220549092908015610d07576005546000199182019181018214610cd357600060056001830381548110610c9857fe5b906000526020600020015490508060058481548110610cb357fe5b600091825260208083209091019290925591825260069052604090208290555b6005805480610cde57fe5b600082815260208082208301600019908101839055909201909255848252600690526040812055505b826006015483600201541415610d7e57600583015460009081526007602052604090206003805467ffffffffffffffff198116600167ffffffffffffffff92831681019092161782559082018054610d759260049291600261010092821615929092026000190116046114c4565b50505050610d85565b5050610c45565b505050505050565b600080610d98611454565b6000806000610da687611312565b9398509196509450925090506000856006811115610dc057fe5b14610dd3576000955050505050506107f8565b604084015181148015610e27576005805486516001820180845560009384527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0909201558651825260066020526040909120555b6040805160e0810182526001808252602088810151818401908152898501518486019081526080808c01516060870190815267ffffffffffffffff8c169187019190915260a086018a905260c086018990528b51600090815260078552969096208551815460ff1916901515178155915193820193909355915160028301559251805192939192610ebe9260038501920190611549565b50608082015160048201805467ffffffffffffffff191667ffffffffffffffff90921691909117905560a0820151600582015560c090910151600690910155935160005550509015949350505050565b610f16611454565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa610f4b57600080fd5b84519b5083519a508251995081519850805197505050505050505060608167ffffffffffffffff81118015610f7f57600080fd5b506040519080825280601f01601f191660200182016040528015610faa576020820181803683370190505b5090508115610fd45787516020890160208301848184846011600019fa610fd057600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b600061100b611454565b83516000908152600760205260408120548190819060ff161561103d5750600193508592506000915081905080611167565b60015487604001511161105f5750600293508592506000915081905080611167565b8551158061107857506001876040015103866040015114155b156110925750600393508592506000915081905080611167565b60c0860151158015906110ac575085604001518660c00151145b156110d3578660200151600254146110d35750600493508592506000915081905080611167565b60808087015160a088015160c0890151928a01515191929091156111585767ffffffffffffffff838116141561111d57506005965088955060009450849350839250611167915050565b8960400151811061114257506006965088955060009450849350839250611167915050565b50508751606089015160408a0151600190930192015b60009750899650919450925090505b9295509295909350565b611179611489565b506040805160e08101825260018082528451602083015293820151909301908301526060818101519083015260808082015167ffffffffffffffff169083015260a0808201519083015260c0908101519082015290565b600060608686868686604051602001808681526020018581526020018467ffffffffffffffff1667ffffffffffffffff1681526020018060200180602001838103835285818151815260200191508051906020019080838360005b8381101561124357818101518382015260200161122b565b50505050905090810190601f1680156112705780820380516001836020036101000a031916815260200191505b50838103825284518152845160209182019186019080838360005b838110156112a357818101518382015260200161128b565b50505050905090810190601f1680156112d05780820380516001836020036101000a031916815260200191505b50975050505050505050604051602081830303815290604052905080516020820160008083836012600019fa61130557600080fd5b5095979650505050505050565b600061131c611454565b6000806000611329611454565b61133287610f0e565b905061133c611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156114025780601f106113d757610100808354040283529160200191611402565b820191906000526020600020905b8154815290600101906020018083116113e557829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506114408282611001565b939c929b5090995097509095509350505050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b6040805160e0810182526000808252602082018190529181018290526060808201526080810182905260a0810182905260c081019190915290565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106114fd5780548555611539565b8280016001018555821561153957600052602060002091601f016020900482015b8281111561153957825482559160010191906001019061151e565b506115459291506115b7565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061158a57805160ff1916838001178555611539565b82800160010185558215611539579182015b8281111561153957825182559160200191906001019061159c565b6115d191905b8082111561154557600081556001016115bd565b9056fe4d697373696e672066696e616c69747920746172676574206865616465722066726f6d207468652073746f72616765a2646970667358221220edcaec08f93f74ce5be00b81da5d6b2276138571a33f1cfdca50e5047f854e6e64736f6c63430006060033 \ No newline at end of file diff --git a/bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt b/bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt deleted file mode 100644 index 13b7daa9a8b8..000000000000 --- a/bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt +++ /dev/null @@ -1,5 +0,0 @@ -Last Change Date: 2020-07-30 -Solc version: 0.6.6+commit.6c089d02.Linux.g++ -Source hash (keccak256): 0xea5d6d744f69157adc2857166792aca139c0b5b186ba89c1011358fbcad90d7e -Source gist: https://github.com/svyatonik/substrate-bridge-sol/blob/6456d3e016c95cd5e6d5e817c23e9e69e739aa78/substrate-bridge.sol -Compiler flags used (command to produce the file): `docker run -i ethereum/solc:0.6.6 --optimize --bin - < substrate-bridge.sol` \ No newline at end of file diff --git a/bridges/relays/bin-ethereum/src/cli.yml b/bridges/relays/bin-ethereum/src/cli.yml deleted file mode 100644 index 78971787c0e2..000000000000 --- a/bridges/relays/bin-ethereum/src/cli.yml +++ /dev/null @@ -1,166 +0,0 @@ -name: ethsub-bridge -version: "0.1.0" -author: Parity Technologies -about: Parity Ethereum (PoA) <-> Substrate bridge -subcommands: - - eth-to-sub: - about: Synchronize headers from Ethereum node to Substrate node. - args: - - eth-host: ð-host - long: eth-host - value_name: ETH_HOST - help: Connect to Ethereum node websocket server at given host. - takes_value: true - - eth-port: ð-port - long: eth-port - value_name: ETH_PORT - help: Connect to Ethereum node websocket server at given port. - takes_value: true - - sub-host: &sub-host - long: sub-host - value_name: SUB_HOST - help: Connect to Substrate node websocket server at given host. - takes_value: true - - sub-port: &sub-port - long: sub-port - value_name: SUB_PORT - help: Connect to Substrate node websocket server at given port. - takes_value: true - - sub-tx-mode: - long: sub-tx-mode - value_name: MODE - help: Submit headers using signed (default) or unsigned transactions. Third mode - backup - submits signed transactions only when we believe that sync has stalled. - takes_value: true - possible_values: - - signed - - unsigned - - backup - - sub-signer: &sub-signer - long: sub-signer - value_name: SUB_SIGNER - help: The SURI of secret key to use when transactions are submitted to the Substrate node. - - sub-signer-password: &sub-signer-password - long: sub-signer-password - value_name: SUB_SIGNER_PASSWORD - help: The password for the SURI of secret key to use when transactions are submitted to the Substrate node. - - sub-pallet-instance: &sub-pallet-instance - long: instance - short: i - value_name: PALLET_INSTANCE - help: The instance of the bridge pallet the relay should follow. - takes_value: true - case_insensitive: true - possible_values: - - Rialto - - Kovan - default_value: Rialto - - no-prometheus: &no-prometheus - long: no-prometheus - help: Do not expose a Prometheus metric endpoint. - - prometheus-host: &prometheus-host - long: prometheus-host - value_name: PROMETHEUS_HOST - help: Expose Prometheus endpoint at given interface. - - prometheus-port: &prometheus-port - long: prometheus-port - value_name: PROMETHEUS_PORT - help: Expose Prometheus endpoint at given port. - - sub-to-eth: - about: Synchronize headers from Substrate node to Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-contract: - long: eth-contract - value_name: ETH_CONTRACT - help: Address of deployed bridge contract. - takes_value: true - - eth-chain-id: ð-chain-id - long: eth-chain-id - value_name: ETH_CHAIN_ID - help: Chain ID to use for signing. - - eth-signer: ð-signer - long: eth-signer - value_name: ETH_SIGNER - help: Hex-encoded secret to use when transactions are submitted to the Ethereum node. - - sub-host: *sub-host - - sub-port: *sub-port - - no-prometheus: *no-prometheus - - prometheus-host: *prometheus-host - - prometheus-port: *prometheus-port - - eth-deploy-contract: - about: Deploy Bridge contract on Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-signer: *eth-signer - - eth-chain-id: *eth-chain-id - - eth-contract-code: - long: eth-contract-code - value_name: ETH_CONTRACT_CODE - help: Bytecode of bridge contract. - takes_value: true - - sub-host: *sub-host - - sub-port: *sub-port - - sub-authorities-set-id: - long: sub-authorities-set-id - value_name: SUB_AUTHORITIES_SET_ID - help: ID of initial GRANDPA authorities set. - takes_value: true - - sub-authorities-set: - long: sub-authorities-set - value_name: SUB_AUTHORITIES_SET - help: Encoded initial GRANDPA authorities set. - takes_value: true - - sub-initial-header: - long: sub-initial-header - value_name: SUB_INITIAL_HEADER - help: Encoded initial Substrate header. - takes_value: true - - eth-submit-exchange-tx: - about: Submit lock funds transaction to Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-nonce: - long: eth-nonce - value_name: ETH_NONCE - help: Nonce that have to be used when building transaction. If not specified, read from PoA node. - takes_value: true - - eth-signer: *eth-signer - - eth-chain-id: *eth-chain-id - - eth-amount: - long: eth-amount - value_name: ETH_AMOUNT - help: Amount of ETH to lock (in wei). - takes_value: true - - sub-recipient: - long: sub-recipient - value_name: SUB_RECIPIENT - help: Hex-encoded Public key of funds recipient in Substrate chain. - takes_value: true - - eth-exchange-sub: - about: Submit proof of PoA lock funds transaction to Substrate node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-start-with-block: - long: eth-start-with-block - value_name: ETH_START_WITH_BLOCK - help: Auto-relay transactions starting with given block number. If not specified, starts with best finalized Ethereum block (known to Substrate node) transactions. - takes_value: true - conflicts_with: - - eth-tx-hash - - eth-tx-hash: - long: eth-tx-hash - value_name: ETH_TX_HASH - help: Hash of the lock funds transaction. - takes_value: true - - sub-host: *sub-host - - sub-port: *sub-port - - sub-signer: *sub-signer - - sub-signer-password: *sub-signer-password - - sub-pallet-instance: *sub-pallet-instance - - no-prometheus: *no-prometheus - - prometheus-host: *prometheus-host - - prometheus-port: *prometheus-port diff --git a/bridges/relays/bin-ethereum/src/error.rs b/bridges/relays/bin-ethereum/src/error.rs deleted file mode 100644 index 61ae2a9a498d..000000000000 --- a/bridges/relays/bin-ethereum/src/error.rs +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::rpc_errors::RpcError; -use thiserror::Error; - -/// Result type used by PoA relay. -pub type Result = std::result::Result; - -/// Ethereum PoA relay errors. -#[derive(Error, Debug)] -pub enum Error { - /// Failed to decode initial header. - #[error("Error decoding initial header: {0}")] - DecodeInitialHeader(codec::Error), - /// RPC error. - #[error("{0}")] - Rpc(#[from] RpcError), - /// Failed to read genesis header. - #[error("Error reading Substrate genesis header: {0:?}")] - ReadGenesisHeader(relay_substrate_client::Error), - /// Failed to read initial GRANDPA authorities. - #[error("Error reading GRANDPA authorities set: {0:?}")] - ReadAuthorities(relay_substrate_client::Error), - /// Failed to deploy bridge contract to Ethereum chain. - #[error("Error deploying contract: {0:?}")] - DeployContract(RpcError), -} diff --git a/bridges/relays/bin-ethereum/src/ethereum_client.rs b/bridges/relays/bin-ethereum/src/ethereum_client.rs deleted file mode 100644 index 75ed57fea163..000000000000 --- a/bridges/relays/bin-ethereum/src/ethereum_client.rs +++ /dev/null @@ -1,631 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{rpc_errors::RpcError, substrate_sync_loop::QueuedRialtoHeader}; - -use async_trait::async_trait; -use bp_eth_poa::signatures::secret_to_address; -use codec::{Decode, Encode}; -use ethabi::FunctionOutputDecoder; -use headers_relay::sync_types::SubmittedHeaders; -use relay_ethereum_client::{ - sign_and_submit_transaction, - types::{Address, CallRequest, HeaderId as EthereumHeaderId, Receipt, H256, U256}, - Client as EthereumClient, Error as EthereumNodeError, SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::HeaderId as RialtoHeaderId; -use relay_utils::{HeaderId, MaybeConnectionError}; -use sp_runtime::EncodedJustification; -use std::collections::HashSet; - -// to encode/decode contract calls -ethabi_contract::use_contract!(bridge_contract, "res/substrate-bridge-abi.json"); - -type RpcResult = std::result::Result; - -/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated -/// interactions involving, for example, an Ethereum contract. -#[async_trait] -pub trait EthereumHighLevelRpc { - /// Returns the best Substrate block that PoA chain knows of. - async fn best_substrate_block(&self, contract_address: Address) -> RpcResult; - - /// Returns true if Substrate header is known to Ethereum node. - async fn substrate_header_known( - &self, - contract_address: Address, - id: RialtoHeaderId, - ) -> RpcResult<(RialtoHeaderId, bool)>; - - /// Submits Substrate headers to Ethereum contract. - async fn submit_substrate_headers( - &self, - params: EthereumSigningParams, - contract_address: Address, - headers: Vec, - ) -> SubmittedHeaders; - - /// Returns ids of incomplete Substrate headers. - async fn incomplete_substrate_headers( - &self, - contract_address: Address, - ) -> RpcResult>; - - /// Complete Substrate header. - async fn complete_substrate_header( - &self, - params: EthereumSigningParams, - contract_address: Address, - id: RialtoHeaderId, - justification: EncodedJustification, - ) -> RpcResult; - - /// Submit ethereum transaction. - async fn submit_ethereum_transaction( - &self, - params: &EthereumSigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, - ) -> RpcResult<()>; - - /// Retrieve transactions receipts for given block. - async fn transaction_receipts( - &self, - id: EthereumHeaderId, - transactions: Vec, - ) -> RpcResult<(EthereumHeaderId, Vec)>; -} - -#[async_trait] -impl EthereumHighLevelRpc for EthereumClient { - async fn best_substrate_block(&self, contract_address: Address) -> RpcResult { - let (encoded_call, call_decoder) = bridge_contract::functions::best_known_header::call(); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - let (number, raw_hash) = call_decoder.decode(&call_result.0)?; - let hash = rialto_runtime::Hash::decode(&mut &raw_hash[..])?; - - if number != number.low_u32().into() { - return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)) - } - - Ok(HeaderId(number.low_u32(), hash)) - } - - async fn substrate_header_known( - &self, - contract_address: Address, - id: RialtoHeaderId, - ) -> RpcResult<(RialtoHeaderId, bool)> { - let (encoded_call, call_decoder) = bridge_contract::functions::is_known_header::call(id.1); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - let is_known_block = call_decoder.decode(&call_result.0)?; - - Ok((id, is_known_block)) - } - - async fn submit_substrate_headers( - &self, - params: EthereumSigningParams, - contract_address: Address, - headers: Vec, - ) -> SubmittedHeaders { - // read nonce of signer - let address: Address = secret_to_address(¶ms.signer); - let nonce = match self.account_nonce(address).await { - Ok(nonce) => nonce, - Err(error) => - return SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: headers.iter().rev().map(|header| header.id()).collect(), - fatal_error: Some(error.into()), - }, - }; - - // submit headers. Note that we're cloning self here. It is ok, because - // cloning `jsonrpsee::Client` only clones reference to background threads - submit_substrate_headers( - EthereumHeadersSubmitter { client: self.clone(), params, contract_address, nonce }, - headers, - ) - .await - } - - async fn incomplete_substrate_headers( - &self, - contract_address: Address, - ) -> RpcResult> { - let (encoded_call, call_decoder) = bridge_contract::functions::incomplete_headers::call(); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - - // Q: Is is correct to call these "incomplete_ids"? - let (incomplete_headers_numbers, incomplete_headers_hashes) = - call_decoder.decode(&call_result.0)?; - let incomplete_ids = incomplete_headers_numbers - .into_iter() - .zip(incomplete_headers_hashes) - .filter_map(|(number, hash)| { - if number != number.low_u32().into() { - return None - } - - Some(HeaderId(number.low_u32(), hash)) - }) - .collect(); - - Ok(incomplete_ids) - } - - async fn complete_substrate_header( - &self, - params: EthereumSigningParams, - contract_address: Address, - id: RialtoHeaderId, - justification: EncodedJustification, - ) -> RpcResult { - let _ = self - .submit_ethereum_transaction( - ¶ms, - Some(contract_address), - None, - false, - bridge_contract::functions::import_finality_proof::encode_input( - id.0, - id.1, - justification, - ), - ) - .await?; - - Ok(id) - } - - async fn submit_ethereum_transaction( - &self, - params: &EthereumSigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, - ) -> RpcResult<()> { - sign_and_submit_transaction(self, params, contract_address, nonce, double_gas, encoded_call) - .await - .map_err(Into::into) - } - - async fn transaction_receipts( - &self, - id: EthereumHeaderId, - transactions: Vec, - ) -> RpcResult<(EthereumHeaderId, Vec)> { - let mut transaction_receipts = Vec::with_capacity(transactions.len()); - for transaction in transactions { - let transaction_receipt = self.transaction_receipt(transaction).await?; - transaction_receipts.push(transaction_receipt); - } - Ok((id, transaction_receipts)) - } -} - -/// Max number of headers which can be sent to Solidity contract. -pub const HEADERS_BATCH: usize = 4; - -/// Substrate headers to send to the Ethereum light client. -/// -/// The Solidity contract can only accept a fixed number of headers in one go. -/// This struct is meant to encapsulate this limitation. -#[derive(Debug)] -#[cfg_attr(test, derive(Clone))] -pub struct HeadersBatch { - pub header1: QueuedRialtoHeader, - pub header2: Option, - pub header3: Option, - pub header4: Option, -} - -impl HeadersBatch { - /// Create new headers from given header & ids collections. - /// - /// This method will pop `HEADERS_BATCH` items from both collections - /// and construct `Headers` object and a vector of `RialtoHeaderId`s. - pub fn pop_from( - headers: &mut Vec, - ids: &mut Vec, - ) -> Result<(Self, Vec), ()> { - if headers.len() != ids.len() { - log::error!(target: "bridge", "Collection size mismatch ({} vs {})", headers.len(), ids.len()); - return Err(()) - } - - let header1 = headers.pop().ok_or(())?; - let header2 = headers.pop(); - let header3 = headers.pop(); - let header4 = headers.pop(); - - let mut submitting_ids = Vec::with_capacity(HEADERS_BATCH); - for _ in 0..HEADERS_BATCH { - submitting_ids.extend(ids.pop().iter()); - } - - Ok((Self { header1, header2, header3, header4 }, submitting_ids)) - } - - /// Returns unified array of headers. - /// - /// The first element is always `Some`. - fn headers(&self) -> [Option<&QueuedRialtoHeader>; HEADERS_BATCH] { - [Some(&self.header1), self.header2.as_ref(), self.header3.as_ref(), self.header4.as_ref()] - } - - /// Encodes all headers. If header is not present an empty vector will be returned. - pub fn encode(&self) -> [Vec; HEADERS_BATCH] { - let encode = |h: &QueuedRialtoHeader| h.header().encode(); - let headers = self.headers(); - [ - headers[0].map(encode).unwrap_or_default(), - headers[1].map(encode).unwrap_or_default(), - headers[2].map(encode).unwrap_or_default(), - headers[3].map(encode).unwrap_or_default(), - ] - } - /// Returns number of contained headers. - pub fn len(&self) -> usize { - let is_set = |h: &Option<&QueuedRialtoHeader>| if h.is_some() { 1 } else { 0 }; - self.headers().iter().map(is_set).sum() - } - - /// Remove headers starting from `idx` (0-based) from this collection. - /// - /// The collection will be left with `[0, idx)` headers. - /// Returns `Err` when `idx == 0`, since `Headers` must contain at least one header, - /// or when `idx > HEADERS_BATCH`. - pub fn split_off(&mut self, idx: usize) -> Result<(), ()> { - if idx == 0 || idx > HEADERS_BATCH { - return Err(()) - } - let mut vals: [_; HEADERS_BATCH] = - [&mut None, &mut self.header2, &mut self.header3, &mut self.header4]; - for val in vals.iter_mut().skip(idx) { - **val = None; - } - Ok(()) - } -} - -/// Substrate headers submitter API. -#[async_trait] -trait HeadersSubmitter { - /// Returns Ok(0) if all given not-yet-imported headers are complete. - /// Returns Ok(index != 0) where index is 1-based index of first header that is incomplete. - /// - /// Returns Err(()) if contract has rejected headers. This means that the contract is - /// unable to import first header (e.g. it may already be imported). - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult; - - /// Submit given headers to Ethereum node. - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()>; -} - -/// Implementation of Substrate headers submitter that sends headers to running Ethereum node. -struct EthereumHeadersSubmitter { - client: EthereumClient, - params: EthereumSigningParams, - contract_address: Address, - nonce: U256, -} - -#[async_trait] -impl HeadersSubmitter for EthereumHeadersSubmitter { - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { - let [h1, h2, h3, h4] = headers.encode(); - let (encoded_call, call_decoder) = - bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4); - let call_request = CallRequest { - to: Some(self.contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.client.eth_call(call_request).await?; - let incomplete_index: U256 = call_decoder.decode(&call_result.0)?; - if incomplete_index > HEADERS_BATCH.into() { - return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex)) - } - - Ok(incomplete_index.low_u32() as _) - } - - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { - let [h1, h2, h3, h4] = headers.encode(); - let result = self - .client - .submit_ethereum_transaction( - &self.params, - Some(self.contract_address), - Some(self.nonce), - false, - bridge_contract::functions::import_headers::encode_input(h1, h2, h3, h4), - ) - .await; - - if result.is_ok() { - self.nonce += U256::one(); - } - - result - } -} - -/// Submit multiple Substrate headers. -async fn submit_substrate_headers( - mut header_submitter: impl HeadersSubmitter, - mut headers: Vec, -) -> SubmittedHeaders { - let mut submitted_headers = SubmittedHeaders::default(); - - let mut ids = headers.iter().map(|header| header.id()).rev().collect::>(); - headers.reverse(); - - while !headers.is_empty() { - let (headers, submitting_ids) = HeadersBatch::pop_from(&mut headers, &mut ids) - .expect("Headers and ids are not empty; qed"); - - submitted_headers.fatal_error = submit_substrate_headers_batch( - &mut header_submitter, - &mut submitted_headers, - submitting_ids, - headers, - ) - .await; - - if submitted_headers.fatal_error.is_some() { - ids.reverse(); - submitted_headers.rejected.extend(ids); - break - } - } - - submitted_headers -} - -/// Submit 4 Substrate headers in single PoA transaction. -async fn submit_substrate_headers_batch( - header_submitter: &mut impl HeadersSubmitter, - submitted_headers: &mut SubmittedHeaders, - mut ids: Vec, - mut headers: HeadersBatch, -) -> Option { - debug_assert_eq!(ids.len(), headers.len(),); - - // if parent of first header is either incomplete, or rejected, we assume that contract - // will reject this header as well - let parent_id = headers.header1.parent_id(); - if submitted_headers.rejected.contains(&parent_id) || - submitted_headers.incomplete.contains(&parent_id) - { - submitted_headers.rejected.extend(ids); - return None - } - - // check if headers are incomplete - let incomplete_header_index = match header_submitter.is_headers_incomplete(&headers).await { - // All headers valid - Ok(0) => None, - Ok(incomplete_header_index) => Some(incomplete_header_index), - Err(error) => { - // contract has rejected all headers => we do not want to submit it - submitted_headers.rejected.extend(ids); - if error.is_connection_error() { - return Some(error) - } else { - return None - } - }, - }; - - // Modify `ids` and `headers` to only contain values that are going to be accepted. - let rejected = if let Some(idx) = incomplete_header_index { - let len = std::cmp::min(idx, ids.len()); - headers - .split_off(len) - .expect("len > 0, the case where all headers are valid is converted to None; qed"); - ids.split_off(len) - } else { - Vec::new() - }; - let submitted = ids; - let submit_result = header_submitter.submit_headers(headers).await; - match submit_result { - Ok(_) => { - if incomplete_header_index.is_some() { - submitted_headers.incomplete.extend(submitted.iter().last().cloned()); - } - submitted_headers.submitted.extend(submitted); - submitted_headers.rejected.extend(rejected); - None - }, - Err(error) => { - submitted_headers.rejected.extend(submitted); - submitted_headers.rejected.extend(rejected); - Some(error) - }, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::traits::Header; - - struct TestHeadersSubmitter { - incomplete: Vec, - failed: Vec, - } - - #[async_trait] - impl HeadersSubmitter for TestHeadersSubmitter { - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { - if self.incomplete.iter().any(|i| i.0 == headers.header1.id().0) { - Ok(1) - } else { - Ok(0) - } - } - - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { - if self.failed.iter().any(|i| i.0 == headers.header1.id().0) { - Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)) - } else { - Ok(()) - } - } - } - - fn header(number: rialto_runtime::BlockNumber) -> QueuedRialtoHeader { - QueuedRialtoHeader::new( - rialto_runtime::Header::new( - number, - Default::default(), - Default::default(), - if number == 0 { Default::default() } else { header(number - 1).id().1 }, - Default::default(), - ) - .into(), - ) - } - - #[test] - fn descendants_of_incomplete_headers_are_not_submitted() { - let submitted_headers = async_std::task::block_on(submit_substrate_headers( - TestHeadersSubmitter { incomplete: vec![header(5).id()], failed: vec![] }, - vec![header(5), header(6)], - )); - assert_eq!(submitted_headers.submitted, vec![header(5).id()]); - assert_eq!(submitted_headers.incomplete, vec![header(5).id()]); - assert_eq!(submitted_headers.rejected, vec![header(6).id()]); - assert!(submitted_headers.fatal_error.is_none()); - } - - #[test] - fn headers_after_fatal_error_are_not_submitted() { - let submitted_headers = async_std::task::block_on(submit_substrate_headers( - TestHeadersSubmitter { incomplete: vec![], failed: vec![header(9).id()] }, - vec![header(5), header(6), header(7), header(8), header(9), header(10), header(11)], - )); - assert_eq!( - submitted_headers.submitted, - vec![header(5).id(), header(6).id(), header(7).id(), header(8).id()] - ); - assert_eq!(submitted_headers.incomplete, vec![]); - assert_eq!( - submitted_headers.rejected, - vec![header(9).id(), header(10).id(), header(11).id(),] - ); - assert!(submitted_headers.fatal_error.is_some()); - } - - fn headers_batch() -> HeadersBatch { - let mut init_headers = vec![header(1), header(2), header(3), header(4), header(5)]; - init_headers.reverse(); - let mut init_ids = init_headers.iter().map(|h| h.id()).collect(); - let (headers, ids) = HeadersBatch::pop_from(&mut init_headers, &mut init_ids).unwrap(); - assert_eq!(init_headers, vec![header(5)]); - assert_eq!(init_ids, vec![header(5).id()]); - assert_eq!(ids, vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()]); - headers - } - - #[test] - fn headers_batch_len() { - let headers = headers_batch(); - assert_eq!(headers.len(), 4); - } - - #[test] - fn headers_batch_encode() { - let headers = headers_batch(); - assert_eq!( - headers.encode(), - [ - header(1).header().encode(), - header(2).header().encode(), - header(3).header().encode(), - header(4).header().encode(), - ] - ); - } - - #[test] - fn headers_batch_split_off() { - // given - let mut headers = headers_batch(); - - // when - assert!(headers.split_off(0).is_err()); - assert_eq!(headers.header1, header(1)); - assert!(headers.header2.is_some()); - assert!(headers.header3.is_some()); - assert!(headers.header4.is_some()); - - // when - let mut h = headers.clone(); - h.split_off(1).unwrap(); - assert!(h.header2.is_none()); - assert!(h.header3.is_none()); - assert!(h.header4.is_none()); - - // when - let mut h = headers.clone(); - h.split_off(2).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_none()); - assert!(h.header4.is_none()); - - // when - let mut h = headers.clone(); - h.split_off(3).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_some()); - assert!(h.header4.is_none()); - - // when - let mut h = headers; - h.split_off(4).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_some()); - assert!(h.header4.is_some()); - } -} diff --git a/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs b/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs deleted file mode 100644 index 76a75b062ecb..000000000000 --- a/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::{Error, Result}, - ethereum_client::{bridge_contract, EthereumHighLevelRpc}, - rpc_errors::RpcError, -}; - -use codec::{Decode, Encode}; -use num_traits::Zero; -use relay_ethereum_client::{ - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, - SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto}; -use relay_substrate_client::{ - Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, - OpaqueGrandpaAuthoritiesSet, -}; -use relay_utils::HeaderId; - -/// Ethereum synchronization parameters. -#[derive(Debug)] -pub struct EthereumDeployContractParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum contract bytecode. - pub eth_contract_code: Vec, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Initial authorities set id. - pub sub_initial_authorities_set_id: Option, - /// Initial authorities set. - pub sub_initial_authorities_set: Option>, - /// Initial header. - pub sub_initial_header: Option>, -} - -/// Deploy Bridge contract on Ethereum chain. -pub async fn run(params: EthereumDeployContractParams) { - let EthereumDeployContractParams { - eth_params, - eth_sign, - sub_params, - sub_initial_authorities_set_id, - sub_initial_authorities_set, - sub_initial_header, - eth_contract_code, - } = params; - - let result = async move { - let eth_client = EthereumClient::try_connect(eth_params).await.map_err(RpcError::Ethereum)?; - let sub_client = SubstrateClient::::try_connect(sub_params).await.map_err(RpcError::Substrate)?; - - let (initial_header_id, initial_header) = prepare_initial_header(&sub_client, sub_initial_header).await?; - let initial_set_id = sub_initial_authorities_set_id.unwrap_or(0); - let initial_set = prepare_initial_authorities_set( - &sub_client, - initial_header_id.1, - sub_initial_authorities_set, - ).await?; - - log::info!( - target: "bridge", - "Deploying Ethereum contract.\r\n\tInitial header: {:?}\r\n\tInitial header id: {:?}\r\n\tInitial header encoded: {}\r\n\tInitial authorities set ID: {}\r\n\tInitial authorities set: {}", - initial_header, - initial_header_id, - hex::encode(&initial_header), - initial_set_id, - hex::encode(&initial_set), - ); - - deploy_bridge_contract( - ð_client, - ð_sign, - eth_contract_code, - initial_header, - initial_set_id, - initial_set, - ).await - }.await; - - if let Err(error) = result { - log::error!(target: "bridge", "{}", error); - } -} - -/// Prepare initial header. -async fn prepare_initial_header( - sub_client: &SubstrateClient, - sub_initial_header: Option>, -) -> Result<(RialtoHeaderId, Vec)> { - match sub_initial_header { - Some(raw_initial_header) => { - match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) { - Ok(initial_header) => - Ok((HeaderId(initial_header.number, initial_header.hash()), raw_initial_header)), - Err(error) => Err(Error::DecodeInitialHeader(error)), - } - }, - None => { - let initial_header = sub_client.header_by_number(Zero::zero()).await; - initial_header - .map(|header| (HeaderId(Zero::zero(), header.hash()), header.encode())) - .map_err(Error::ReadGenesisHeader) - }, - } -} - -/// Prepare initial GRANDPA authorities set. -async fn prepare_initial_authorities_set( - sub_client: &SubstrateClient, - sub_initial_header_hash: rialto_runtime::Hash, - sub_initial_authorities_set: Option>, -) -> Result { - let initial_authorities_set = match sub_initial_authorities_set { - Some(initial_authorities_set) => Ok(initial_authorities_set), - None => sub_client.grandpa_authorities_set(sub_initial_header_hash).await, - }; - - initial_authorities_set.map_err(Error::ReadAuthorities) -} - -/// Deploy bridge contract to Ethereum chain. -async fn deploy_bridge_contract( - eth_client: &EthereumClient, - params: &EthereumSigningParams, - contract_code: Vec, - initial_header: Vec, - initial_set_id: u64, - initial_authorities: Vec, -) -> Result<()> { - eth_client - .submit_ethereum_transaction( - params, - None, - None, - false, - bridge_contract::constructor( - contract_code, - initial_header, - initial_set_id, - initial_authorities, - ), - ) - .await - .map_err(Error::DeployContract) -} diff --git a/bridges/relays/bin-ethereum/src/ethereum_exchange.rs b/bridges/relays/bin-ethereum/src/ethereum_exchange.rs deleted file mode 100644 index 90d9a23835d4..000000000000 --- a/bridges/relays/bin-ethereum/src/ethereum_exchange.rs +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of PoA -> Substrate exchange transactions. - -use crate::{ - instances::BridgeInstance, - rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc}, - rpc_errors::RpcError, - substrate_types::into_substrate_ethereum_receipt, -}; - -use async_trait::async_trait; -use bp_currency_exchange::MaybeLockFundsTransaction; -use exchange_relay::{ - exchange::{ - relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient, - TransactionProofPipeline, - }, - exchange_loop::{run as run_loop, InMemoryStorage}, -}; -use relay_ethereum_client::{ - types::{ - HeaderId as EthereumHeaderId, HeaderWithTransactions as EthereumHeaderWithTransactions, - Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256, - HEADER_ID_PROOF, - }, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, -}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Chain as SubstrateChain, Client as SubstrateClient, - ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, HeaderId}; -use rialto_runtime::exchange::EthereumTransactionInclusionProof; -use std::{sync::Arc, time::Duration}; - -/// Interval at which we ask Ethereum node for updates. -const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); - -/// Exchange relay mode. -#[derive(Debug)] -pub enum ExchangeRelayMode { - /// Relay single transaction and quit. - Single(EthereumTransactionHash), - /// Auto-relay transactions starting with given block. - Auto(Option), -} - -/// PoA exchange transaction relay params. -pub struct EthereumExchangeParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Substrate signing params. - pub sub_sign: RialtoSigningParams, - /// Relay working mode. - pub mode: ExchangeRelayMode, - /// Metrics parameters. - pub metrics_params: MetricsParams, - /// Instance of the bridge pallet being synchronized. - pub instance: Arc, -} - -impl std::fmt::Debug for EthereumExchangeParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.debug_struct("EthereumExchangeParams") - .field("eth_params", &self.eth_params) - .field("sub_params", &self.sub_params) - .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) - .field("mode", &self.mode) - .field("metrics_params", &self.metrics_params) - .field("instance", &self.instance) - .finish() - } -} - -/// Ethereum to Substrate exchange pipeline. -struct EthereumToSubstrateExchange; - -impl TransactionProofPipeline for EthereumToSubstrateExchange { - const SOURCE_NAME: &'static str = "Ethereum"; - const TARGET_NAME: &'static str = "Substrate"; - - type Block = EthereumSourceBlock; - type TransactionProof = EthereumTransactionInclusionProof; -} - -/// Ethereum source block. -struct EthereumSourceBlock(EthereumHeaderWithTransactions); - -impl SourceBlock for EthereumSourceBlock { - type Hash = H256; - type Number = u64; - type Transaction = EthereumSourceTransaction; - - fn id(&self) -> EthereumHeaderId { - HeaderId( - self.0.number.expect(HEADER_ID_PROOF).as_u64(), - self.0.hash.expect(HEADER_ID_PROOF), - ) - } - - fn transactions(&self) -> Vec { - self.0.transactions.iter().cloned().map(EthereumSourceTransaction).collect() - } -} - -/// Ethereum source transaction. -struct EthereumSourceTransaction(EthereumTransaction); - -impl SourceTransaction for EthereumSourceTransaction { - type Hash = EthereumTransactionHash; - - fn hash(&self) -> Self::Hash { - self.0.hash - } -} - -/// Ethereum node as transactions proof source. -#[derive(Clone)] -struct EthereumTransactionsSource { - client: EthereumClient, -} - -#[async_trait] -impl RelayClient for EthereumTransactionsSource { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl SourceClient for EthereumTransactionsSource { - async fn tick(&self) { - async_std::task::sleep(ETHEREUM_TICK_INTERVAL).await; - } - - async fn block_by_hash(&self, hash: H256) -> Result { - self.client - .header_by_hash_with_transactions(hash) - .await - .map(EthereumSourceBlock) - .map_err(Into::into) - } - - async fn block_by_number(&self, number: u64) -> Result { - self.client - .header_by_number_with_transactions(number) - .await - .map(EthereumSourceBlock) - .map_err(Into::into) - } - - async fn transaction_block( - &self, - hash: &EthereumTransactionHash, - ) -> Result, RpcError> { - let eth_tx = match self.client.transaction_by_hash(*hash).await? { - Some(eth_tx) => eth_tx, - None => return Ok(None), - }; - - // we need transaction to be mined => check if it is included in the block - let (eth_header_id, eth_tx_index) = - match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) { - (Some(block_number), Some(block_hash), Some(transaction_index)) => - (HeaderId(block_number.as_u64(), block_hash), transaction_index.as_u64() as _), - _ => return Ok(None), - }; - - Ok(Some((eth_header_id, eth_tx_index))) - } - - async fn transaction_proof( - &self, - block: &EthereumSourceBlock, - tx_index: usize, - ) -> Result { - const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = - "RPC level checks that transactions from Ethereum\ - node are having `raw` field; qed"; - const BLOCK_HAS_HASH_FIELD_PROOF: &str = - "RPC level checks that block has `hash` field; qed"; - - let mut transaction_proof = Vec::with_capacity(block.0.transactions.len()); - for tx in &block.0.transactions { - let raw_tx_receipt = self - .client - .transaction_receipt(tx.hash) - .await - .map(|receipt| into_substrate_ethereum_receipt(&receipt)) - .map(|receipt| receipt.rlp())?; - let raw_tx = tx.raw.clone().expect(TRANSACTION_HAS_RAW_FIELD_PROOF).0; - transaction_proof.push((raw_tx, raw_tx_receipt)); - } - - Ok(EthereumTransactionInclusionProof { - block: block.0.hash.expect(BLOCK_HAS_HASH_FIELD_PROOF), - index: tx_index as _, - proof: transaction_proof, - }) - } -} - -/// Substrate node as transactions proof target. -#[derive(Clone)] -struct SubstrateTransactionsTarget { - client: SubstrateClient, - sign_params: RialtoSigningParams, - bridge_instance: Arc, -} - -#[async_trait] -impl RelayClient for SubstrateTransactionsTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - Ok(self.client.reconnect().await?) - } -} - -#[async_trait] -impl TargetClient for SubstrateTransactionsTarget { - async fn tick(&self) { - async_std::task::sleep(Rialto::AVERAGE_BLOCK_INTERVAL).await; - } - - async fn is_header_known(&self, id: &EthereumHeaderId) -> Result { - self.client.ethereum_header_known(*id).await - } - - async fn is_header_finalized(&self, id: &EthereumHeaderId) -> Result { - // we check if header is finalized by simple comparison of the header number and - // number of best finalized PoA header known to Substrate node. - // - // this may lead to failure in tx proof import if PoA reorganization has happened - // after we have checked that our tx has been included into given block - // - // the fix is easy, but since this code is mostly developed for demonstration purposes, - // I'm leaving this KISS-based design here - let best_finalized_ethereum_block = self.client.best_ethereum_finalized_block().await?; - Ok(id.0 <= best_finalized_ethereum_block.0) - } - - async fn best_finalized_header_id(&self) -> Result { - // we can't continue to relay exchange proofs if Substrate node is out of sync, because - // it may have already received (some of) proofs that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_ethereum_finalized_block().await - } - - async fn filter_transaction_proof( - &self, - proof: &EthereumTransactionInclusionProof, - ) -> Result { - // let's try to parse transaction locally - let (raw_tx, raw_tx_receipt) = &proof.proof[proof.index as usize]; - let parse_result = rialto_runtime::exchange::EthTransaction::parse(raw_tx); - if parse_result.is_err() { - return Ok(false) - } - - // now let's check if transaction is successful - match bp_eth_poa::Receipt::is_successful_raw_receipt(raw_tx_receipt) { - Ok(true) => (), - _ => return Ok(false), - } - - // seems that transaction is relayable - let's check if runtime is able to import it - // (we can't if e.g. header is pruned or there's some issue with tx data) - self.client.verify_exchange_transaction_proof(proof.clone()).await - } - - async fn submit_transaction_proof( - &self, - proof: EthereumTransactionInclusionProof, - ) -> Result<(), RpcError> { - let (sign_params, bridge_instance) = - (self.sign_params.clone(), self.bridge_instance.clone()); - self.client - .submit_exchange_transaction_proof(sign_params, bridge_instance, proof) - .await - } -} - -/// Relay exchange transaction proof(s) to Substrate node. -pub async fn run(params: EthereumExchangeParams) { - match params.mode { - ExchangeRelayMode::Single(eth_tx_hash) => { - let result = run_single_transaction_relay(params, eth_tx_hash).await; - match result { - Ok(_) => log::info!( - target: "bridge", - "Ethereum transaction {} proof has been successfully submitted to Substrate node", - eth_tx_hash, - ), - Err(err) => log::error!( - target: "bridge", - "Error submitting Ethereum transaction {} proof to Substrate node: {}", - eth_tx_hash, - err, - ), - } - }, - ExchangeRelayMode::Auto(eth_start_with_block_number) => { - let result = - run_auto_transactions_relay_loop(params, eth_start_with_block_number).await; - if let Err(err) = result { - log::error!( - target: "bridge", - "Error auto-relaying Ethereum transactions proofs to Substrate node: {}", - err, - ); - } - }, - } -} - -/// Run single transaction proof relay and stop. -async fn run_single_transaction_relay( - params: EthereumExchangeParams, - eth_tx_hash: H256, -) -> anyhow::Result<()> { - let EthereumExchangeParams { eth_params, sub_params, sub_sign, instance, .. } = params; - - let eth_client = EthereumClient::try_connect(eth_params).await.map_err(RpcError::Ethereum)?; - let sub_client = SubstrateClient::::try_connect(sub_params) - .await - .map_err(RpcError::Substrate)?; - - let source = EthereumTransactionsSource { client: eth_client }; - let target = SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }; - - relay_single_transaction_proof(&source, &target, eth_tx_hash) - .await - .map_err(Into::into) -} - -async fn run_auto_transactions_relay_loop( - params: EthereumExchangeParams, - eth_start_with_block_number: Option, -) -> anyhow::Result<()> { - let EthereumExchangeParams { - eth_params, sub_params, sub_sign, metrics_params, instance, .. - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let eth_start_with_block_number = match eth_start_with_block_number { - Some(eth_start_with_block_number) => eth_start_with_block_number, - None => - sub_client - .best_ethereum_finalized_block() - .await - .map_err(|err| { - anyhow::format_err!( - "Error retrieving best finalized Ethereum block from Substrate node: {:?}", - err - ) - })? - .0, - }; - - run_loop( - InMemoryStorage::new(eth_start_with_block_number), - EthereumTransactionsSource { client: eth_client }, - SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }, - metrics_params, - futures::future::pending(), - ) - .await?; - - Ok(()) -} diff --git a/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs b/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs deleted file mode 100644 index f68a21e594e0..000000000000 --- a/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Submitting Ethereum -> Substrate exchange transactions. - -use anyhow::anyhow; -use bp_eth_poa::{ - signatures::{secret_to_address, SignTransaction}, - UnsignedTransaction, -}; -use relay_ethereum_client::{ - types::{CallRequest, U256}, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, - SigningParams as EthereumSigningParams, -}; -use rialto_runtime::exchange::LOCK_FUNDS_ADDRESS; - -/// Ethereum exchange transaction params. -#[derive(Debug)] -pub struct EthereumExchangeSubmitParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum signer nonce. - pub eth_nonce: Option, - /// Amount of Ethereum tokens to lock. - pub eth_amount: U256, - /// Funds recipient on Substrate side. - pub sub_recipient: [u8; 32], -} - -/// Submit single Ethereum -> Substrate exchange transaction. -pub async fn run(params: EthereumExchangeSubmitParams) { - let EthereumExchangeSubmitParams { eth_params, eth_sign, eth_nonce, eth_amount, sub_recipient } = - params; - - let result: anyhow::Result<_> = async move { - let eth_client = EthereumClient::try_connect(eth_params) - .await - .map_err(|err| anyhow!("error connecting to Ethereum node: {:?}", err))?; - - let eth_signer_address = secret_to_address(ð_sign.signer); - let sub_recipient_encoded = sub_recipient; - let nonce = match eth_nonce { - Some(eth_nonce) => eth_nonce, - None => eth_client - .account_nonce(eth_signer_address) - .await - .map_err(|err| anyhow!("error fetching acount nonce: {:?}", err))?, - }; - let gas = eth_client - .estimate_gas(CallRequest { - from: Some(eth_signer_address), - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: Some(eth_amount), - data: Some(sub_recipient_encoded.to_vec().into()), - ..Default::default() - }) - .await - .map_err(|err| anyhow!("error estimating gas requirements: {:?}", err))?; - let eth_tx_unsigned = UnsignedTransaction { - nonce, - gas_price: eth_sign.gas_price, - gas, - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: eth_amount, - payload: sub_recipient_encoded.to_vec(), - }; - let eth_tx_signed = - eth_tx_unsigned.clone().sign_by(ð_sign.signer, Some(eth_sign.chain_id)); - eth_client - .submit_transaction(eth_tx_signed) - .await - .map_err(|err| anyhow!("error submitting transaction: {:?}", err))?; - - Ok(eth_tx_unsigned) - } - .await; - - match result { - Ok(eth_tx_unsigned) => { - log::info!( - target: "bridge", - "Exchange transaction has been submitted to Ethereum node: {:?}", - eth_tx_unsigned, - ); - }, - Err(err) => { - log::error!( - target: "bridge", - "Error submitting exchange transaction to Ethereum node: {}", - err, - ); - }, - } -} diff --git a/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs b/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs deleted file mode 100644 index ee5f8a4600ec..000000000000 --- a/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum PoA -> Rialto-Substrate synchronization. - -use crate::{ - ethereum_client::EthereumHighLevelRpc, - instances::BridgeInstance, - rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc}, - rpc_errors::RpcError, - substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}, -}; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::{HeadersSyncParams, TargetTransactionMode}, - sync_loop::{SourceClient, TargetClient}, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; -use relay_ethereum_client::{ - types::{HeaderHash, HeaderId as EthereumHeaderId, Receipt, SyncHeader as Header}, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, -}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Chain as SubstrateChain, Client as SubstrateClient, - ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; - -use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration}; - -pub mod consts { - use super::*; - - /// Interval at which we check new Ethereum headers when we are synced/almost synced. - pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); - /// Max number of headers in single submit transaction. - pub const MAX_HEADERS_IN_SINGLE_SUBMIT: usize = 32; - /// Max total size of headers in single submit transaction. This only affects signed - /// submissions, when several headers are submitted at once. 4096 is the maximal **expected** - /// size of the Ethereum header + transactions receipts (if they're required). - pub const MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT: usize = MAX_HEADERS_IN_SINGLE_SUBMIT * 4096; - /// Max Ethereum headers we want to have in all 'before-submitted' states. - pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 128; - /// Max Ethereum headers count we want to have in 'submitted' state. - pub const MAX_SUBMITTED_HEADERS: usize = 128; - /// Max depth of in-memory headers in all states. Past this depth they will be forgotten - /// (pruned). - pub const PRUNE_DEPTH: u32 = 4096; -} - -/// Ethereum synchronization parameters. -pub struct EthereumSyncParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Substrate signing params. - pub sub_sign: RialtoSigningParams, - /// Synchronization parameters. - pub sync_params: HeadersSyncParams, - /// Metrics parameters. - pub metrics_params: MetricsParams, - /// Instance of the bridge pallet being synchronized. - pub instance: Arc, -} - -impl Debug for EthereumSyncParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.debug_struct("EthereumSyncParams") - .field("eth_params", &self.eth_params) - .field("sub_params", &self.sub_params) - .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) - .field("sync_params", &self.sync_params) - .field("metrics_params", &self.metrics_params) - .field("instance", &self.instance) - .finish() - } -} - -/// Ethereum synchronization pipeline. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct EthereumHeadersSyncPipeline; - -impl HeadersSyncPipeline for EthereumHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Ethereum"; - const TARGET_NAME: &'static str = "Substrate"; - - type Hash = HeaderHash; - type Number = u64; - type Header = Header; - type Extra = Vec; - type Completion = (); - - fn estimate_size(source: &QueuedHeader) -> usize { - into_substrate_ethereum_header(source.header()).encode().len() + - into_substrate_ethereum_receipts(source.extra()) - .map(|extra| extra.encode().len()) - .unwrap_or(0) - } -} - -/// Queued ethereum header ID. -pub type QueuedEthereumHeader = QueuedHeader; - -/// Ethereum client as headers source. -#[derive(Clone)] -struct EthereumHeadersSource { - /// Ethereum node client. - client: EthereumClient, -} - -impl EthereumHeadersSource { - fn new(client: EthereumClient) -> Self { - Self { client } - } -} - -#[async_trait] -impl RelayClient for EthereumHeadersSource { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl SourceClient for EthereumHeadersSource { - async fn best_block_number(&self) -> Result { - // we **CAN** continue to relay headers if Ethereum node is out of sync, because - // Substrate node may be missing headers that are already available at the Ethereum - - self.client.best_block_number().await.map_err(Into::into) - } - - async fn header_by_hash(&self, hash: HeaderHash) -> Result { - self.client.header_by_hash(hash).await.map(Into::into).map_err(Into::into) - } - - async fn header_by_number(&self, number: u64) -> Result { - self.client.header_by_number(number).await.map(Into::into).map_err(Into::into) - } - - async fn header_completion( - &self, - id: EthereumHeaderId, - ) -> Result<(EthereumHeaderId, Option<()>), RpcError> { - Ok((id, None)) - } - - async fn header_extra( - &self, - id: EthereumHeaderId, - header: QueuedEthereumHeader, - ) -> Result<(EthereumHeaderId, Vec), RpcError> { - self.client.transaction_receipts(id, header.header().transactions.clone()).await - } -} - -#[derive(Clone)] -struct SubstrateHeadersTarget { - /// Substrate node client. - client: SubstrateClient, - /// Whether we want to submit signed (true), or unsigned (false) transactions. - sign_transactions: bool, - /// Substrate signing params. - sign_params: RialtoSigningParams, - /// Bridge instance used in Ethereum to Substrate sync. - bridge_instance: Arc, -} - -impl SubstrateHeadersTarget { - fn new( - client: SubstrateClient, - sign_transactions: bool, - sign_params: RialtoSigningParams, - bridge_instance: Arc, - ) -> Self { - Self { client, sign_transactions, sign_params, bridge_instance } - } -} - -#[async_trait] -impl RelayClient for SubstrateHeadersTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - Ok(self.client.reconnect().await?) - } -} - -#[async_trait] -impl TargetClient for SubstrateHeadersTarget { - async fn best_header_id(&self) -> Result { - // we can't continue to relay headers if Substrate node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_ethereum_block().await - } - - async fn is_known_header( - &self, - id: EthereumHeaderId, - ) -> Result<(EthereumHeaderId, bool), RpcError> { - Ok((id, self.client.ethereum_header_known(id).await?)) - } - - async fn submit_headers( - &self, - headers: Vec, - ) -> SubmittedHeaders { - let (sign_params, bridge_instance, sign_transactions) = - (self.sign_params.clone(), self.bridge_instance.clone(), self.sign_transactions); - self.client - .submit_ethereum_headers(sign_params, bridge_instance, headers, sign_transactions) - .await - } - - async fn incomplete_headers_ids(&self) -> Result, RpcError> { - Ok(HashSet::new()) - } - - #[allow(clippy::unit_arg)] - async fn complete_header( - &self, - id: EthereumHeaderId, - _completion: (), - ) -> Result { - Ok(id) - } - - async fn requires_extra( - &self, - header: QueuedEthereumHeader, - ) -> Result<(EthereumHeaderId, bool), RpcError> { - // we can minimize number of receipts_check calls by checking header - // logs bloom here, but it may give us false positives (when authorities - // source is contract, we never need any logs) - let id = header.header().id(); - let sub_eth_header = into_substrate_ethereum_header(header.header()); - Ok((id, self.client.ethereum_receipts_required(sub_eth_header).await?)) - } -} - -/// Run Ethereum headers synchronization. -pub async fn run(params: EthereumSyncParams) -> Result<(), RpcError> { - let EthereumSyncParams { - eth_params, - sub_params, - sub_sign, - sync_params, - metrics_params, - instance, - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let sign_sub_transactions = match sync_params.target_tx_mode { - TargetTransactionMode::Signed | TargetTransactionMode::Backup => true, - TargetTransactionMode::Unsigned => false, - }; - - let source = EthereumHeadersSource::new(eth_client); - let target = SubstrateHeadersTarget::new(sub_client, sign_sub_transactions, sub_sign, instance); - - headers_relay::sync_loop::run( - source, - consts::ETHEREUM_TICK_INTERVAL, - target, - Rialto::AVERAGE_BLOCK_INTERVAL, - (), - sync_params, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(|e| RpcError::SyncLoop(e.to_string()))?; - - Ok(()) -} diff --git a/bridges/relays/bin-ethereum/src/instances.rs b/bridges/relays/bin-ethereum/src/instances.rs deleted file mode 100644 index 74feb1da320d..000000000000 --- a/bridges/relays/bin-ethereum/src/instances.rs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The PoA Bridge Pallet provides a way to include multiple instances of itself in a runtime. When -//! synchronizing a Substrate chain which can include multiple instances of the bridge pallet we -//! must somehow decide which of the instances to sync. -//! -//! Note that each instance of the bridge pallet is coupled with an instance of the currency -//! exchange pallet. We must also have a way to create `Call`s for the correct currency exchange -//! instance. -//! -//! This module helps by preparing the correct `Call`s for each of the different pallet instances. - -use crate::{ - ethereum_sync_loop::QueuedEthereumHeader, - substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}, -}; - -use rialto_runtime::{exchange::EthereumTransactionInclusionProof as Proof, Call}; - -/// Interface for `Calls` which are needed to correctly sync the bridge. -/// -/// Each instance of the bridge and currency exchange pallets in the bridge runtime requires similar -/// but slightly different `Call` in order to be synchronized. -pub trait BridgeInstance: Send + Sync + std::fmt::Debug { - /// Used to build a `Call` for importing signed headers to a Substrate runtime. - fn build_signed_header_call(&self, headers: Vec) -> Call; - /// Used to build a `Call` for importing an unsigned header to a Substrate runtime. - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call; - /// Used to build a `Call` for importing peer transactions to a Substrate runtime. - fn build_currency_exchange_call(&self, proof: Proof) -> Call; -} - -/// Corresponds to the Rialto instance used in the bridge runtime. -#[derive(Default, Clone, Debug)] -pub struct RialtoPoA; - -impl BridgeInstance for RialtoPoA { - fn build_signed_header_call(&self, headers: Vec) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers { - headers_with_receipts: headers - .into_iter() - .map(|header| { - ( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ) - }) - .collect(), - }; - - rialto_runtime::Call::BridgeRialtoPoa(pallet_call) - } - - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header { - header: Box::new(into_substrate_ethereum_header(header.header())), - receipts: into_substrate_ethereum_receipts(header.extra()), - }; - - rialto_runtime::Call::BridgeRialtoPoa(pallet_call) - } - - fn build_currency_exchange_call(&self, proof: Proof) -> Call { - let pallet_call = - rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction { proof }; - rialto_runtime::Call::BridgeRialtoCurrencyExchange(pallet_call) - } -} - -/// Corresponds to the Kovan instance used in the bridge runtime. -#[derive(Default, Clone, Debug)] -pub struct Kovan; - -impl BridgeInstance for Kovan { - fn build_signed_header_call(&self, headers: Vec) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers { - headers_with_receipts: headers - .into_iter() - .map(|header| { - ( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ) - }) - .collect(), - }; - - rialto_runtime::Call::BridgeKovan(pallet_call) - } - - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header { - header: Box::new(into_substrate_ethereum_header(header.header())), - receipts: into_substrate_ethereum_receipts(header.extra()), - }; - - rialto_runtime::Call::BridgeKovan(pallet_call) - } - - fn build_currency_exchange_call(&self, proof: Proof) -> Call { - let pallet_call = - rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction { proof }; - rialto_runtime::Call::BridgeKovanCurrencyExchange(pallet_call) - } -} diff --git a/bridges/relays/bin-ethereum/src/main.rs b/bridges/relays/bin-ethereum/src/main.rs deleted file mode 100644 index 99e1b48968d7..000000000000 --- a/bridges/relays/bin-ethereum/src/main.rs +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![recursion_limit = "1024"] - -mod error; -mod ethereum_client; -mod ethereum_deploy_contract; -mod ethereum_exchange; -mod ethereum_exchange_submit; -mod ethereum_sync_loop; -mod instances; -mod rialto_client; -mod rpc_errors; -mod substrate_sync_loop; -mod substrate_types; - -use anyhow::anyhow; -use ethereum_deploy_contract::EthereumDeployContractParams; -use ethereum_exchange::EthereumExchangeParams; -use ethereum_exchange_submit::EthereumExchangeSubmitParams; -use ethereum_sync_loop::EthereumSyncParams; -use headers_relay::sync::TargetTransactionMode; -use hex_literal::hex; -use instances::{BridgeInstance, Kovan, RialtoPoA}; -use libsecp256k1::SecretKey; -use relay_utils::{ - initialize::initialize_relay, - metrics::{MetricsAddress, MetricsParams}, -}; -use sp_core::crypto::Pair; -use substrate_sync_loop::SubstrateSyncParams; - -use headers_relay::sync::HeadersSyncParams; -use relay_ethereum_client::{ - ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::SigningParams as RialtoSigningParams; -use relay_substrate_client::ConnectionParams as SubstrateConnectionParams; -use std::sync::Arc; - -fn main() { - initialize_relay(); - - let yaml = clap::load_yaml!("cli.yml"); - let matches = clap::App::from_yaml(yaml).get_matches(); - async_std::task::block_on(run_command(&matches)); -} - -async fn run_command(matches: &clap::ArgMatches<'_>) { - match matches.subcommand() { - ("eth-to-sub", Some(eth_to_sub_matches)) => { - log::info!(target: "bridge", "Starting ETH âž¡ SUB relay."); - if ethereum_sync_loop::run(match ethereum_sync_params(eth_to_sub_matches) { - Ok(ethereum_sync_params) => ethereum_sync_params, - Err(err) => { - log::error!(target: "bridge", "Error parsing parameters: {}", err); - return - }, - }) - .await - .is_err() - { - log::error!(target: "bridge", "Unable to get Substrate genesis block for Ethereum sync."); - }; - }, - ("sub-to-eth", Some(sub_to_eth_matches)) => { - log::info!(target: "bridge", "Starting SUB âž¡ ETH relay."); - if substrate_sync_loop::run(match substrate_sync_params(sub_to_eth_matches) { - Ok(substrate_sync_params) => substrate_sync_params, - Err(err) => { - log::error!(target: "bridge", "Error parsing parameters: {}", err); - return - }, - }) - .await - .is_err() - { - log::error!(target: "bridge", "Unable to get Substrate genesis block for Substrate sync."); - }; - }, - ("eth-deploy-contract", Some(eth_deploy_matches)) => { - log::info!(target: "bridge", "Deploying ETH contracts."); - ethereum_deploy_contract::run( - match ethereum_deploy_contract_params(eth_deploy_matches) { - Ok(ethereum_deploy_params) => ethereum_deploy_params, - Err(err) => { - log::error!(target: "bridge", "Error during contract deployment: {}", err); - return - }, - }, - ) - .await; - }, - ("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => { - log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction."); - ethereum_exchange_submit::run( - match ethereum_exchange_submit_params(eth_exchange_submit_matches) { - Ok(eth_exchange_submit_params) => eth_exchange_submit_params, - Err(err) => { - log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err); - return - }, - }, - ) - .await; - }, - ("eth-exchange-sub", Some(eth_exchange_matches)) => { - log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay."); - ethereum_exchange::run(match ethereum_exchange_params(eth_exchange_matches) { - Ok(eth_exchange_params) => eth_exchange_params, - Err(err) => { - log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err); - return - }, - }) - .await; - }, - ("", _) => { - log::error!(target: "bridge", "No subcommand specified"); - }, - _ => unreachable!("all possible subcommands are checked above; qed"), - } -} - -fn ethereum_connection_params( - matches: &clap::ArgMatches, -) -> anyhow::Result { - let mut params = EthereumConnectionParams::default(); - if let Some(eth_host) = matches.value_of("eth-host") { - params.host = eth_host.into(); - } - if let Some(eth_port) = matches.value_of("eth-port") { - params.port = eth_port.parse().map_err(|e| anyhow!("Failed to parse eth-port: {}", e))?; - } - Ok(params) -} - -fn ethereum_signing_params(matches: &clap::ArgMatches) -> anyhow::Result { - let mut params = EthereumSigningParams::default(); - if let Some(eth_signer) = matches.value_of("eth-signer") { - params.signer = SecretKey::parse_slice( - &hex::decode(eth_signer).map_err(|e| anyhow!("Failed to parse eth-signer: {}", e))?, - ) - .map_err(|e| anyhow!("Invalid eth-signer: {}", e))?; - } - if let Some(eth_chain_id) = matches.value_of("eth-chain-id") { - params.chain_id = eth_chain_id - .parse::() - .map_err(|e| anyhow!("Failed to parse eth-chain-id: {}", e))?; - } - Ok(params) -} - -fn substrate_connection_params( - matches: &clap::ArgMatches, -) -> anyhow::Result { - let mut params = SubstrateConnectionParams::default(); - if let Some(sub_host) = matches.value_of("sub-host") { - params.host = sub_host.into(); - } - if let Some(sub_port) = matches.value_of("sub-port") { - params.port = sub_port.parse().map_err(|e| anyhow!("Failed to parse sub-port: {}", e))?; - } - Ok(params) -} - -fn rialto_signing_params(matches: &clap::ArgMatches) -> anyhow::Result { - let mut params = sp_keyring::AccountKeyring::Alice.pair(); - - if let Some(sub_signer) = matches.value_of("sub-signer") { - let sub_signer_password = matches.value_of("sub-signer-password"); - params = sp_core::sr25519::Pair::from_string(sub_signer, sub_signer_password) - .map_err(|e| anyhow!("Failed to parse sub-signer: {:?}", e))?; - } - Ok(params) -} - -fn ethereum_sync_params(matches: &clap::ArgMatches) -> anyhow::Result { - use crate::ethereum_sync_loop::consts::*; - - let mut sync_params = HeadersSyncParams { - max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, - max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, - max_headers_in_single_submit: MAX_HEADERS_IN_SINGLE_SUBMIT, - max_headers_size_in_single_submit: MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT, - prune_depth: PRUNE_DEPTH, - target_tx_mode: TargetTransactionMode::Signed, - }; - - match matches.value_of("sub-tx-mode") { - Some("signed") => sync_params.target_tx_mode = TargetTransactionMode::Signed, - Some("unsigned") => { - sync_params.target_tx_mode = TargetTransactionMode::Unsigned; - - // tx pool won't accept too much unsigned transactions - sync_params.max_headers_in_submitted_status = 10; - }, - Some("backup") => sync_params.target_tx_mode = TargetTransactionMode::Backup, - Some(mode) => return Err(anyhow!("Invalid sub-tx-mode: {}", mode)), - None => sync_params.target_tx_mode = TargetTransactionMode::Signed, - } - - let params = EthereumSyncParams { - eth_params: ethereum_connection_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_sign: rialto_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - instance: instance_params(matches)?, - sync_params, - }; - - log::debug!(target: "bridge", "Ethereum sync params: {:?}", params); - - Ok(params) -} - -fn substrate_sync_params(matches: &clap::ArgMatches) -> anyhow::Result { - use crate::substrate_sync_loop::consts::*; - - let eth_contract_address: relay_ethereum_client::types::Address = - if let Some(eth_contract) = matches.value_of("eth-contract") { - eth_contract.parse()? - } else { - "731a10897d267e19b34503ad902d0a29173ba4b1" - .parse() - .expect("address is hardcoded, thus valid; qed") - }; - - let params = SubstrateSyncParams { - sub_params: substrate_connection_params(matches)?, - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - sync_params: HeadersSyncParams { - max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, - max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, - max_headers_in_single_submit: MAX_SUBMITTED_HEADERS, - max_headers_size_in_single_submit: std::usize::MAX, - prune_depth: PRUNE_DEPTH, - target_tx_mode: TargetTransactionMode::Signed, - }, - eth_contract_address, - }; - - log::debug!(target: "bridge", "Substrate sync params: {:?}", params); - - Ok(params) -} - -fn ethereum_deploy_contract_params( - matches: &clap::ArgMatches, -) -> anyhow::Result { - let eth_contract_code = - parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| { - hex::decode(include_str!("../res/substrate-bridge-bytecode.hex")) - .expect("code is hardcoded, thus valid; qed") - }); - let sub_initial_authorities_set_id = matches - .value_of("sub-authorities-set-id") - .map(|set| { - set.parse() - .map_err(|e| anyhow!("Failed to parse sub-authorities-set-id: {}", e)) - }) - .transpose()?; - let sub_initial_authorities_set = parse_hex_argument(matches, "sub-authorities-set")?; - let sub_initial_header = parse_hex_argument(matches, "sub-initial-header")?; - - let params = EthereumDeployContractParams { - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_initial_authorities_set_id, - sub_initial_authorities_set, - sub_initial_header, - eth_contract_code, - }; - - log::debug!(target: "bridge", "Deploy params: {:?}", params); - - Ok(params) -} - -fn ethereum_exchange_submit_params( - matches: &clap::ArgMatches, -) -> anyhow::Result { - let eth_nonce = matches - .value_of("eth-nonce") - .map(|eth_nonce| { - relay_ethereum_client::types::U256::from_dec_str(eth_nonce) - .map_err(|e| anyhow!("Failed to parse eth-nonce: {}", e)) - }) - .transpose()?; - - let eth_amount = matches - .value_of("eth-amount") - .map(|eth_amount| { - eth_amount.parse().map_err(|e| anyhow!("Failed to parse eth-amount: {}", e)) - }) - .transpose()? - .unwrap_or_else(|| { - // This is in Wei, represents 1 ETH - 1_000_000_000_000_000_000_u64.into() - }); - - // This is the well-known Substrate account of Ferdie - let default_recepient = - hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c"); - - let sub_recipient = if let Some(sub_recipient) = matches.value_of("sub-recipient") { - hex::decode(&sub_recipient) - .map_err(|err| err.to_string()) - .and_then(|vsub_recipient| { - let expected_len = default_recepient.len(); - if expected_len != vsub_recipient.len() { - Err(format!("invalid length. Expected {} bytes", expected_len)) - } else { - let mut sub_recipient = default_recepient; - sub_recipient.copy_from_slice(&vsub_recipient[..expected_len]); - Ok(sub_recipient) - } - }) - .map_err(|e| anyhow!("Failed to parse sub-recipient: {}", e))? - } else { - default_recepient - }; - - let params = EthereumExchangeSubmitParams { - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - eth_nonce, - eth_amount, - sub_recipient, - }; - - log::debug!(target: "bridge", "Submit Ethereum exchange tx params: {:?}", params); - - Ok(params) -} - -fn ethereum_exchange_params(matches: &clap::ArgMatches) -> anyhow::Result { - let mode = match matches.value_of("eth-tx-hash") { - Some(eth_tx_hash) => ethereum_exchange::ExchangeRelayMode::Single( - eth_tx_hash.parse().map_err(|e| anyhow!("Failed to parse eth-tx-hash: {}", e))?, - ), - None => ethereum_exchange::ExchangeRelayMode::Auto( - matches - .value_of("eth-start-with-block") - .map(|eth_start_with_block| { - eth_start_with_block - .parse() - .map_err(|e| anyhow!("Failed to parse eth-start-with-block: {}", e)) - }) - .transpose()?, - ), - }; - - let params = EthereumExchangeParams { - eth_params: ethereum_connection_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_sign: rialto_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - instance: instance_params(matches)?, - mode, - }; - - log::debug!(target: "bridge", "Ethereum exchange params: {:?}", params); - - Ok(params) -} - -fn metrics_params(matches: &clap::ArgMatches) -> anyhow::Result { - if matches.is_present("no-prometheus") { - return Ok(None.into()) - } - - let mut metrics_params = MetricsAddress::default(); - - if let Some(prometheus_host) = matches.value_of("prometheus-host") { - metrics_params.host = prometheus_host.into(); - } - if let Some(prometheus_port) = matches.value_of("prometheus-port") { - metrics_params.port = prometheus_port - .parse() - .map_err(|e| anyhow!("Failed to parse prometheus-port: {}", e))?; - } - - Ok(Some(metrics_params).into()) -} - -fn instance_params(matches: &clap::ArgMatches) -> anyhow::Result> { - let instance = if let Some(instance) = matches.value_of("sub-pallet-instance") { - match instance.to_lowercase().as_str() { - "rialto" => Arc::new(RialtoPoA) as Arc, - "kovan" => Arc::new(Kovan), - _ => return Err(anyhow!("Unsupported bridge pallet instance")), - } - } else { - unreachable!("CLI config enforces a default instance, can never be None") - }; - - Ok(instance) -} - -fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> anyhow::Result>> { - match matches.value_of(arg) { - Some(value) => - Ok(Some(hex::decode(value).map_err(|e| anyhow!("Failed to parse {}: {}", arg, e))?)), - None => Ok(None), - } -} diff --git a/bridges/relays/bin-ethereum/src/rialto_client.rs b/bridges/relays/bin-ethereum/src/rialto_client.rs deleted file mode 100644 index 1dadf9f7ddff..000000000000 --- a/bridges/relays/bin-ethereum/src/rialto_client.rs +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - ethereum_sync_loop::QueuedEthereumHeader, instances::BridgeInstance, rpc_errors::RpcError, -}; - -use async_trait::async_trait; -use bp_eth_poa::AuraHeader as SubstrateEthereumHeader; -use codec::{Decode, Encode}; -use headers_relay::sync_types::SubmittedHeaders; -use relay_ethereum_client::types::HeaderId as EthereumHeaderId; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Client as SubstrateClient, TransactionSignScheme, UnsignedTransaction, -}; -use relay_utils::HeaderId; -use sp_core::{crypto::Pair, Bytes}; -use std::{collections::VecDeque, sync::Arc}; - -const ETH_API_IMPORT_REQUIRES_RECEIPTS: &str = "RialtoPoAHeaderApi_is_import_requires_receipts"; -const ETH_API_IS_KNOWN_BLOCK: &str = "RialtoPoAHeaderApi_is_known_block"; -const ETH_API_BEST_BLOCK: &str = "RialtoPoAHeaderApi_best_block"; -const ETH_API_BEST_FINALIZED_BLOCK: &str = "RialtoPoAHeaderApi_finalized_block"; -const EXCH_API_FILTER_TRANSACTION_PROOF: &str = - "RialtoCurrencyExchangeApi_filter_transaction_proof"; - -type RpcResult = std::result::Result; - -/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated -/// interactions involving, for example, an Ethereum bridge module. -#[async_trait] -pub trait SubstrateHighLevelRpc { - /// Returns the best Ethereum block that Substrate runtime knows of. - async fn best_ethereum_block(&self) -> RpcResult; - /// Returns best finalized Ethereum block that Substrate runtime knows of. - async fn best_ethereum_finalized_block(&self) -> RpcResult; - /// Returns whether transactions receipts are required for Ethereum header submission. - async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult; - /// Returns whether the given Ethereum header is known to the Substrate runtime. - async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult; -} - -#[async_trait] -impl SubstrateHighLevelRpc for SubstrateClient { - async fn best_ethereum_block(&self) -> RpcResult { - let call = ETH_API_BEST_BLOCK.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = self.state_call(call, data, None).await?; - let decoded_response: (u64, bp_eth_poa::H256) = - Decode::decode(&mut &encoded_response.0[..])?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) - } - - async fn best_ethereum_finalized_block(&self) -> RpcResult { - let call = ETH_API_BEST_FINALIZED_BLOCK.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = self.state_call(call, data, None).await?; - let decoded_response: (u64, bp_eth_poa::H256) = - Decode::decode(&mut &encoded_response.0[..])?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) - } - - async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult { - let call = ETH_API_IMPORT_REQUIRES_RECEIPTS.to_string(); - let data = Bytes(header.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let receipts_required: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(receipts_required) - } - - // The Substrate module could prune old headers. So this function could return false even - // if header is synced. And we'll mark corresponding Ethereum header as Orphan. - // - // But when we read the best header from Substrate next time, we will know that - // there's a better header. This Orphan will either be marked as synced, or - // eventually pruned. - async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult { - let call = ETH_API_IS_KNOWN_BLOCK.to_string(); - let data = Bytes(header_id.1.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let is_known_block: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(is_known_block) - } -} - -/// A trait for RPC calls which are used to submit Ethereum headers to a Substrate -/// runtime. These are typically calls which use a combination of other low-level RPC -/// calls. -#[async_trait] -pub trait SubmitEthereumHeaders { - /// Submits Ethereum header to Substrate runtime. - async fn submit_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - sign_transactions: bool, - ) -> SubmittedHeaders; - - /// Submits signed Ethereum header to Substrate runtime. - async fn submit_signed_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders; - - /// Submits unsigned Ethereum header to Substrate runtime. - async fn submit_unsigned_ethereum_headers( - &self, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders; -} - -#[async_trait] -impl SubmitEthereumHeaders for SubstrateClient { - async fn submit_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - sign_transactions: bool, - ) -> SubmittedHeaders { - if sign_transactions { - self.submit_signed_ethereum_headers(params, instance, headers).await - } else { - self.submit_unsigned_ethereum_headers(instance, headers).await - } - } - - async fn submit_signed_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders { - let ids = headers.iter().map(|header| header.id()).collect(); - let genesis_hash = *self.genesis_hash(); - let submission_result = async { - self.submit_signed_extrinsic( - (*params.public().as_array_ref()).into(), - move |_, transaction_nonce| { - Bytes( - Rialto::sign_transaction( - genesis_hash, - ¶ms, - relay_substrate_client::TransactionEra::immortal(), - UnsignedTransaction::new( - instance.build_signed_header_call(headers), - transaction_nonce, - ), - ) - .encode(), - ) - }, - ) - .await?; - Ok(()) - } - .await; - - match submission_result { - Ok(_) => SubmittedHeaders { - submitted: ids, - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - }, - Err(error) => SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: ids, - fatal_error: Some(error), - }, - } - } - - async fn submit_unsigned_ethereum_headers( - &self, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders { - let mut ids = headers.iter().map(|header| header.id()).collect::>(); - let mut submitted_headers = SubmittedHeaders::default(); - - for header in headers { - let id = ids.pop_front().expect("both collections have same size; qed"); - - let call = instance.build_unsigned_header_call(header); - let transaction = create_unsigned_submit_transaction(call); - - match self.submit_unsigned_extrinsic(Bytes(transaction.encode())).await { - Ok(_) => submitted_headers.submitted.push(id), - Err(error) => { - submitted_headers.rejected.push(id); - submitted_headers.rejected.extend(ids); - submitted_headers.fatal_error = Some(error.into()); - break - }, - } - } - - submitted_headers - } -} - -/// A trait for RPC calls which are used to submit proof of Ethereum exchange transaction to a -/// Substrate runtime. These are typically calls which use a combination of other low-level RPC -/// calls. -#[async_trait] -pub trait SubmitEthereumExchangeTransactionProof { - /// Pre-verify Ethereum exchange transaction proof. - async fn verify_exchange_transaction_proof( - &self, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult; - /// Submits Ethereum exchange transaction proof to Substrate runtime. - async fn submit_exchange_transaction_proof( - &self, - params: RialtoSigningParams, - instance: Arc, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult<()>; -} - -#[async_trait] -impl SubmitEthereumExchangeTransactionProof for SubstrateClient { - async fn verify_exchange_transaction_proof( - &self, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult { - let call = EXCH_API_FILTER_TRANSACTION_PROOF.to_string(); - let data = Bytes(proof.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let is_allowed: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(is_allowed) - } - - async fn submit_exchange_transaction_proof( - &self, - params: RialtoSigningParams, - instance: Arc, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult<()> { - let genesis_hash = *self.genesis_hash(); - self.submit_signed_extrinsic( - (*params.public().as_array_ref()).into(), - move |_, transaction_nonce| { - Bytes( - Rialto::sign_transaction( - genesis_hash, - ¶ms, - relay_substrate_client::TransactionEra::immortal(), - UnsignedTransaction::new( - instance.build_currency_exchange_call(proof), - transaction_nonce, - ), - ) - .encode(), - ) - }, - ) - .await?; - Ok(()) - } -} - -/// Create unsigned Substrate transaction for submitting Ethereum header. -fn create_unsigned_submit_transaction( - call: rialto_runtime::Call, -) -> rialto_runtime::UncheckedExtrinsic { - rialto_runtime::UncheckedExtrinsic::new_unsigned(call) -} diff --git a/bridges/relays/bin-ethereum/src/rpc_errors.rs b/bridges/relays/bin-ethereum/src/rpc_errors.rs deleted file mode 100644 index e91bc363839b..000000000000 --- a/bridges/relays/bin-ethereum/src/rpc_errors.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use relay_ethereum_client::Error as EthereumNodeError; -use relay_substrate_client::Error as SubstrateNodeError; -use relay_utils::MaybeConnectionError; -use thiserror::Error; - -/// Contains common errors that can occur when -/// interacting with a Substrate or Ethereum node -/// through RPC. -#[derive(Debug, Error)] -pub enum RpcError { - /// The arguments to the RPC method failed to serialize. - #[error("RPC arguments serialization failed: {0}")] - Serialization(#[from] serde_json::Error), - /// An error occurred when interacting with an Ethereum node. - #[error("Ethereum node error: {0}")] - Ethereum(#[from] EthereumNodeError), - /// An error occurred when interacting with a Substrate node. - #[error("Substrate node error: {0}")] - Substrate(#[from] SubstrateNodeError), - /// Error running relay loop. - #[error("{0}")] - SyncLoop(String), -} - -impl From for String { - fn from(err: RpcError) -> Self { - format!("{}", err) - } -} - -impl From for RpcError { - fn from(err: ethabi::Error) -> Self { - Self::Ethereum(EthereumNodeError::ResponseParseFailed(format!("{}", err))) - } -} - -impl MaybeConnectionError for RpcError { - fn is_connection_error(&self) -> bool { - match self { - RpcError::Ethereum(ref error) => error.is_connection_error(), - RpcError::Substrate(ref error) => error.is_connection_error(), - _ => false, - } - } -} - -impl From for RpcError { - fn from(err: codec::Error) -> Self { - Self::Substrate(SubstrateNodeError::ResponseParseFailed(err)) - } -} diff --git a/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs b/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs deleted file mode 100644 index 4b5bd4fa7326..000000000000 --- a/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-Substrate -> Ethereum PoA synchronization. - -use crate::{ethereum_client::EthereumHighLevelRpc, rpc_errors::RpcError}; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::HeadersSyncParams, - sync_loop::TargetClient, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; -use relay_ethereum_client::{ - types::Address, Client as EthereumClient, ConnectionParams as EthereumConnectionParams, - SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SyncHeader as RialtoSyncHeader}; -use relay_substrate_client::{ - headers_source::HeadersSource, Chain as SubstrateChain, Client as SubstrateClient, - ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; -use sp_runtime::EncodedJustification; - -use std::{collections::HashSet, fmt::Debug, time::Duration}; - -pub mod consts { - use super::*; - - /// Interval at which we check new Ethereum blocks. - pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(5); - /// Max Ethereum headers we want to have in all 'before-submitted' states. - pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 8; - /// Max Ethereum headers count we want to have in 'submitted' state. - pub const MAX_SUBMITTED_HEADERS: usize = 4; - /// Max depth of in-memory headers in all states. Past this depth they will be forgotten - /// (pruned). - pub const PRUNE_DEPTH: u32 = 256; -} - -/// Substrate synchronization parameters. -#[derive(Debug)] -pub struct SubstrateSyncParams { - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum bridge contract address. - pub eth_contract_address: Address, - /// Synchronization parameters. - pub sync_params: HeadersSyncParams, - /// Metrics parameters. - pub metrics_params: MetricsParams, -} - -/// Substrate synchronization pipeline. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct SubstrateHeadersSyncPipeline; - -impl HeadersSyncPipeline for SubstrateHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Substrate"; - const TARGET_NAME: &'static str = "Ethereum"; - - type Hash = rialto_runtime::Hash; - type Number = rialto_runtime::BlockNumber; - type Header = RialtoSyncHeader; - type Extra = (); - type Completion = EncodedJustification; - - fn estimate_size(source: &QueuedHeader) -> usize { - source.header().encode().len() - } -} - -/// Queued substrate header ID. -pub type QueuedRialtoHeader = QueuedHeader; - -/// Rialto node as headers source. -type SubstrateHeadersSource = HeadersSource; - -/// Ethereum client as Substrate headers target. -#[derive(Clone)] -struct EthereumHeadersTarget { - /// Ethereum node client. - client: EthereumClient, - /// Bridge contract address. - contract: Address, - /// Ethereum signing params. - sign_params: EthereumSigningParams, -} - -impl EthereumHeadersTarget { - fn new(client: EthereumClient, contract: Address, sign_params: EthereumSigningParams) -> Self { - Self { client, contract, sign_params } - } -} - -#[async_trait] -impl RelayClient for EthereumHeadersTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl TargetClient for EthereumHeadersTarget { - async fn best_header_id(&self) -> Result { - // we can't continue to relay headers if Ethereum node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_substrate_block(self.contract).await - } - - async fn is_known_header( - &self, - id: RialtoHeaderId, - ) -> Result<(RialtoHeaderId, bool), RpcError> { - self.client.substrate_header_known(self.contract, id).await - } - - async fn submit_headers( - &self, - headers: Vec, - ) -> SubmittedHeaders { - self.client - .submit_substrate_headers(self.sign_params.clone(), self.contract, headers) - .await - } - - async fn incomplete_headers_ids(&self) -> Result, RpcError> { - self.client.incomplete_substrate_headers(self.contract).await - } - - async fn complete_header( - &self, - id: RialtoHeaderId, - completion: EncodedJustification, - ) -> Result { - self.client - .complete_substrate_header(self.sign_params.clone(), self.contract, id, completion) - .await - } - - async fn requires_extra( - &self, - header: QueuedRialtoHeader, - ) -> Result<(RialtoHeaderId, bool), RpcError> { - Ok((header.header().id(), false)) - } -} - -/// Run Substrate headers synchronization. -pub async fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { - let SubstrateSyncParams { - sub_params, - eth_params, - eth_sign, - eth_contract_address, - sync_params, - metrics_params, - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let target = EthereumHeadersTarget::new(eth_client, eth_contract_address, eth_sign); - let source = SubstrateHeadersSource::new(sub_client); - - headers_relay::sync_loop::run( - source, - Rialto::AVERAGE_BLOCK_INTERVAL, - target, - consts::ETHEREUM_TICK_INTERVAL, - (), - sync_params, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(|e| RpcError::SyncLoop(e.to_string()))?; - - Ok(()) -} diff --git a/bridges/relays/bin-ethereum/src/substrate_types.rs b/bridges/relays/bin-ethereum/src/substrate_types.rs deleted file mode 100644 index f9e6c29c6a65..000000000000 --- a/bridges/relays/bin-ethereum/src/substrate_types.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Converting between Ethereum headers and bridge module types. - -use bp_eth_poa::{ - AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry, - Receipt as SubstrateEthereumReceipt, TransactionOutcome as SubstrateEthereumTransactionOutcome, -}; -use relay_ethereum_client::types::{ - Header as EthereumHeader, Receipt as EthereumReceipt, - HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF, -}; - -/// Convert Ethereum header into Ethereum header for Substrate. -pub fn into_substrate_ethereum_header(header: &EthereumHeader) -> SubstrateEthereumHeader { - SubstrateEthereumHeader { - parent_hash: header.parent_hash, - timestamp: header.timestamp.as_u64(), - number: header.number.expect(ETHEREUM_HEADER_ID_PROOF).as_u64(), - author: header.author, - transactions_root: header.transactions_root, - uncles_hash: header.uncles_hash, - extra_data: header.extra_data.0.clone(), - state_root: header.state_root, - receipts_root: header.receipts_root, - log_bloom: header.logs_bloom.unwrap_or_default().data().into(), - gas_used: header.gas_used, - gas_limit: header.gas_limit, - difficulty: header.difficulty, - seal: header.seal_fields.iter().map(|s| s.0.clone()).collect(), - } -} - -/// Convert Ethereum transactions receipts into Ethereum transactions receipts for Substrate. -pub fn into_substrate_ethereum_receipts( - receipts: &Option>, -) -> Option> { - receipts - .as_ref() - .map(|receipts| receipts.iter().map(into_substrate_ethereum_receipt).collect()) -} - -/// Convert Ethereum transactions receipt into Ethereum transactions receipt for Substrate. -pub fn into_substrate_ethereum_receipt(receipt: &EthereumReceipt) -> SubstrateEthereumReceipt { - SubstrateEthereumReceipt { - gas_used: receipt.cumulative_gas_used, - log_bloom: receipt.logs_bloom.data().into(), - logs: receipt - .logs - .iter() - .map(|log_entry| SubstrateEthereumLogEntry { - address: log_entry.address, - topics: log_entry.topics.clone(), - data: log_entry.data.0.clone(), - }) - .collect(), - outcome: match (receipt.status, receipt.root) { - (Some(status), None) => - SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8), - (None, Some(root)) => SubstrateEthereumTransactionOutcome::StateRoot(root), - _ => SubstrateEthereumTransactionOutcome::Unknown, - }, - } -} diff --git a/bridges/relays/bin-substrate/src/cli/encode_call.rs b/bridges/relays/bin-substrate/src/cli/encode_call.rs index f496f78b29d2..ca0e6dd8abff 100644 --- a/bridges/relays/bin-substrate/src/cli/encode_call.rs +++ b/bridges/relays/bin-substrate/src/cli/encode_call.rs @@ -345,7 +345,7 @@ mod tests { // then assert!(format!("{:?}", call_hex).starts_with( - "0x10030000000001000000381409000000000001d43593c715fdd31c61141abd04a99fd6822c8558854cc\ + "0x0c030000000001000000381409000000000001d43593c715fdd31c61141abd04a99fd6822c8558854cc\ de39a5684e7a56da27d01d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01" )) } diff --git a/bridges/relays/client-ethereum/Cargo.toml b/bridges/relays/client-ethereum/Cargo.toml deleted file mode 100644 index 171988a32533..000000000000 --- a/bridges/relays/client-ethereum/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "relay-ethereum-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -bp-eth-poa = { path = "../../primitives/ethereum-poa" } -headers-relay = { path = "../headers" } -hex-literal = "0.3" -jsonrpsee-proc-macros = "0.3.1" -jsonrpsee-ws-client = "0.3.1" -libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"] } -log = "0.4.11" -relay-utils = { path = "../utils" } -tokio = "1.8" -web3 = { git = "https://github.com/svyatonik/rust-web3.git", branch = "bump-deps" } -thiserror = "1.0.26" diff --git a/bridges/relays/client-ethereum/src/client.rs b/bridges/relays/client-ethereum/src/client.rs deleted file mode 100644 index 48b7c9386f35..000000000000 --- a/bridges/relays/client-ethereum/src/client.rs +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - rpc::Ethereum, - types::{ - Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx, - SyncState, Transaction, TransactionHash, H256, U256, - }, - ConnectionParams, Error, Result, -}; - -use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; -use relay_utils::relay_loop::RECONNECT_DELAY; -use std::{future::Future, sync::Arc}; - -/// Number of headers missing from the Ethereum node for us to consider node not synced. -const MAJOR_SYNC_BLOCKS: u64 = 5; - -/// The client used to interact with an Ethereum node through RPC. -#[derive(Clone)] -pub struct Client { - tokio: Arc, - params: ConnectionParams, - client: Arc, -} - -impl Client { - /// Create a new Ethereum RPC Client. - /// - /// This function will keep connecting to given Ethereum node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to Ethereum node: {:?}. Going to retry in {}s", - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Ethereum node. Returns Ethereum RPC client if connection has been - /// established or error otherwise. - pub async fn try_connect(params: ConnectionParams) -> Result { - let (tokio, client) = Self::build_client(¶ms).await?; - Ok(Self { tokio, client, params }) - } - - /// Build client to use in connection. - async fn build_client( - params: &ConnectionParams, - ) -> Result<(Arc, Arc)> { - let tokio = tokio::runtime::Runtime::new()?; - let uri = format!("ws://{}:{}", params.host, params.port); - let client = tokio - .spawn(async move { RpcClientBuilder::default().build(&uri).await }) - .await??; - Ok((Arc::new(tokio), Arc::new(client))) - } - - /// Reopen client connection. - pub async fn reconnect(&mut self) -> Result<()> { - let (tokio, client) = Self::build_client(&self.params).await?; - self.tokio = tokio; - self.client = client; - Ok(()) - } -} - -impl Client { - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - self.jsonrpsee_execute(move |client| async move { - match Ethereum::syncing(&*client).await? { - SyncState::NotSyncing => Ok(()), - SyncState::Syncing(syncing) => { - let missing_headers = - syncing.highest_block.saturating_sub(syncing.current_block); - if missing_headers > MAJOR_SYNC_BLOCKS.into() { - return Err(Error::ClientNotSynced(missing_headers)) - } - - Ok(()) - }, - } - }) - .await - } - - /// Estimate gas usage for the given call. - pub async fn estimate_gas(&self, call_request: CallRequest) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::estimate_gas(&*client, call_request).await?) - }) - .await - } - - /// Retrieve number of the best known block from the Ethereum node. - pub async fn best_block_number(&self) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::block_number(&*client).await?.as_u64()) - }) - .await - } - - /// Retrieve number of the best known block from the Ethereum node. - pub async fn header_by_number(&self, block_number: u64) -> Result
{ - self.jsonrpsee_execute(move |client| async move { - let get_full_tx_objects = false; - let header = - Ethereum::get_block_by_number(&*client, block_number, get_full_tx_objects).await?; - match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { - true => Ok(header), - false => Err(Error::IncompleteHeader), - } - }) - .await - } - - /// Retrieve block header by its hash from Ethereum node. - pub async fn header_by_hash(&self, hash: H256) -> Result
{ - self.jsonrpsee_execute(move |client| async move { - let get_full_tx_objects = false; - let header = Ethereum::get_block_by_hash(&*client, hash, get_full_tx_objects).await?; - match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { - true => Ok(header), - false => Err(Error::IncompleteHeader), - } - }) - .await - } - - /// Retrieve block header and its transactions by its number from Ethereum node. - pub async fn header_by_number_with_transactions( - &self, - number: u64, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let get_full_tx_objects = true; - let header = Ethereum::get_block_by_number_with_transactions( - &*client, - number, - get_full_tx_objects, - ) - .await?; - - let is_complete_header = - header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); - if !is_complete_header { - return Err(Error::IncompleteHeader) - } - - let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); - if !is_complete_transactions { - return Err(Error::IncompleteTransaction) - } - - Ok(header) - }) - .await - } - - /// Retrieve block header and its transactions by its hash from Ethereum node. - pub async fn header_by_hash_with_transactions( - &self, - hash: H256, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let get_full_tx_objects = true; - let header = - Ethereum::get_block_by_hash_with_transactions(&*client, hash, get_full_tx_objects) - .await?; - - let is_complete_header = - header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); - if !is_complete_header { - return Err(Error::IncompleteHeader) - } - - let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); - if !is_complete_transactions { - return Err(Error::IncompleteTransaction) - } - - Ok(header) - }) - .await - } - - /// Retrieve transaction by its hash from Ethereum node. - pub async fn transaction_by_hash(&self, hash: H256) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::transaction_by_hash(&*client, hash).await?) - }) - .await - } - - /// Retrieve transaction receipt by transaction hash. - pub async fn transaction_receipt(&self, transaction_hash: H256) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::get_transaction_receipt(&*client, transaction_hash).await?) - }) - .await - } - - /// Get the nonce of the given account. - pub async fn account_nonce(&self, address: Address) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::get_transaction_count(&*client, address).await?) - }) - .await - } - - /// Submit an Ethereum transaction. - /// - /// The transaction must already be signed before sending it through this method. - pub async fn submit_transaction(&self, signed_raw_tx: SignedRawTx) -> Result { - self.jsonrpsee_execute(move |client| async move { - let transaction = Bytes(signed_raw_tx); - let tx_hash = Ethereum::submit_transaction(&*client, transaction).await?; - log::trace!(target: "bridge", "Sent transaction to Ethereum node: {:?}", tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Call Ethereum smart contract. - pub async fn eth_call(&self, call_transaction: CallRequest) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(Ethereum::call(&*client, call_transaction).await?) - }) - .await - } - - /// Execute jsonrpsee future in tokio context. - async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result - where - MF: FnOnce(Arc) -> F + Send + 'static, - F: Future> + Send, - T: Send + 'static, - { - let client = self.client.clone(); - self.tokio.spawn(async move { make_jsonrpsee_future(client).await }).await? - } -} diff --git a/bridges/relays/client-ethereum/src/error.rs b/bridges/relays/client-ethereum/src/error.rs deleted file mode 100644 index 6323b708fc02..000000000000 --- a/bridges/relays/client-ethereum/src/error.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum node RPC errors. - -use crate::types::U256; - -use jsonrpsee_ws_client::types::Error as RpcError; -use relay_utils::MaybeConnectionError; -use thiserror::Error; - -/// Result type used by Ethereum client. -pub type Result = std::result::Result; - -/// Errors that can occur only when interacting with -/// an Ethereum node through RPC. -#[derive(Debug, Error)] -pub enum Error { - /// IO error. - #[error("IO error: {0}")] - Io(#[from] std::io::Error), - /// An error that can occur when making an HTTP request to - /// an JSON-RPC client. - #[error("RPC error: {0}")] - RpcError(#[from] RpcError), - /// Failed to parse response. - #[error("Response parse failed: {0}")] - ResponseParseFailed(String), - /// We have received a header with missing fields. - #[error("Incomplete Ethereum Header Received (missing some of required fields - hash, number, logs_bloom).")] - IncompleteHeader, - /// We have received a transaction missing a `raw` field. - #[error("Incomplete Ethereum Transaction (missing required field - raw).")] - IncompleteTransaction, - /// An invalid Substrate block number was received from - /// an Ethereum node. - #[error("Received an invalid Substrate block from Ethereum Node.")] - InvalidSubstrateBlockNumber, - /// An invalid index has been received from an Ethereum node. - #[error("Received an invalid incomplete index from Ethereum Node.")] - InvalidIncompleteIndex, - /// The client we're connected to is not synced, so we can't rely on its state. Contains - /// number of unsynced headers. - #[error("Ethereum client is not synced: syncing {0} headers.")] - ClientNotSynced(U256), - /// Custom logic error. - #[error("{0}")] - Custom(String), -} - -impl From for Error { - fn from(error: tokio::task::JoinError) -> Self { - Error::Custom(format!("Failed to wait tokio task: {}", error)) - } -} - -impl MaybeConnectionError for Error { - fn is_connection_error(&self) -> bool { - matches!( - *self, - Error::RpcError(RpcError::Transport(_)) - // right now if connection to the ws server is dropped (after it is already established), - // we're getting this error - | Error::RpcError(RpcError::Internal(_)) - | Error::RpcError(RpcError::RestartNeeded(_)) - | Error::ClientNotSynced(_), - ) - } -} diff --git a/bridges/relays/client-ethereum/src/lib.rs b/bridges/relays/client-ethereum/src/lib.rs deleted file mode 100644 index fa4877f8e5cf..000000000000 --- a/bridges/relays/client-ethereum/src/lib.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools to interact with (Open) Ethereum node using RPC methods. - -#![warn(missing_docs)] - -mod client; -mod error; -mod rpc; -mod sign; - -pub use crate::{ - client::Client, - error::{Error, Result}, - sign::{sign_and_submit_transaction, SigningParams}, -}; - -pub mod types; - -/// Ethereum-over-websocket connection params. -#[derive(Debug, Clone)] -pub struct ConnectionParams { - /// Websocket server host name. - pub host: String, - /// Websocket server TCP port. - pub port: u16, -} - -impl Default for ConnectionParams { - fn default() -> Self { - ConnectionParams { host: "localhost".into(), port: 8546 } - } -} diff --git a/bridges/relays/client-ethereum/src/rpc.rs b/bridges/relays/client-ethereum/src/rpc.rs deleted file mode 100644 index 2479338b1015..000000000000 --- a/bridges/relays/client-ethereum/src/rpc.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum node RPC interface. - -use crate::types::{ - Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction, - TransactionHash, H256, U256, U64, -}; - -jsonrpsee_proc_macros::rpc_client_api! { - pub(crate) Ethereum { - #[rpc(method = "eth_syncing", positional_params)] - fn syncing() -> SyncState; - #[rpc(method = "eth_estimateGas", positional_params)] - fn estimate_gas(call_request: CallRequest) -> U256; - #[rpc(method = "eth_blockNumber", positional_params)] - fn block_number() -> U64; - #[rpc(method = "eth_getBlockByNumber", positional_params)] - fn get_block_by_number(block_number: U64, full_tx_objs: bool) -> Header; - #[rpc(method = "eth_getBlockByHash", positional_params)] - fn get_block_by_hash(hash: H256, full_tx_objs: bool) -> Header; - #[rpc(method = "eth_getBlockByNumber", positional_params)] - fn get_block_by_number_with_transactions(number: U64, full_tx_objs: bool) -> HeaderWithTransactions; - #[rpc(method = "eth_getBlockByHash", positional_params)] - fn get_block_by_hash_with_transactions(hash: H256, full_tx_objs: bool) -> HeaderWithTransactions; - #[rpc(method = "eth_getTransactionByHash", positional_params)] - fn transaction_by_hash(hash: H256) -> Option; - #[rpc(method = "eth_getTransactionReceipt", positional_params)] - fn get_transaction_receipt(transaction_hash: H256) -> Receipt; - #[rpc(method = "eth_getTransactionCount", positional_params)] - fn get_transaction_count(address: Address) -> U256; - #[rpc(method = "eth_submitTransaction", positional_params)] - fn submit_transaction(transaction: Bytes) -> TransactionHash; - #[rpc(method = "eth_call", positional_params)] - fn call(transaction_call: CallRequest) -> Bytes; - } -} diff --git a/bridges/relays/client-ethereum/src/sign.rs b/bridges/relays/client-ethereum/src/sign.rs deleted file mode 100644 index 86ddcc871c40..000000000000 --- a/bridges/relays/client-ethereum/src/sign.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - types::{Address, CallRequest, U256}, - Client, Result, -}; -use bp_eth_poa::signatures::{secret_to_address, SignTransaction}; -use hex_literal::hex; -use libsecp256k1::SecretKey; - -/// Ethereum signing params. -#[derive(Clone, Debug)] -pub struct SigningParams { - /// Ethereum chain id. - pub chain_id: u64, - /// Ethereum transactions signer. - pub signer: SecretKey, - /// Gas price we agree to pay. - pub gas_price: U256, -} - -impl Default for SigningParams { - fn default() -> Self { - SigningParams { - chain_id: 0x11, // Parity dev chain - // account that has a lot of ether when we run instant seal engine - // address: 0x00a329c0648769a73afac7f9381e08fb43dbea72 - // secret: 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 - signer: SecretKey::parse(&hex!( - "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" - )) - .expect("secret is hardcoded, thus valid; qed"), - gas_price: 8_000_000_000u64.into(), // 8 Gwei - } - } -} - -/// Sign and submit transaction using given Ethereum client. -pub async fn sign_and_submit_transaction( - client: &Client, - params: &SigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, -) -> Result<()> { - let nonce = if let Some(n) = nonce { - n - } else { - let address: Address = secret_to_address(¶ms.signer); - client.account_nonce(address).await? - }; - - let call_request = CallRequest { - to: contract_address, - data: Some(encoded_call.clone().into()), - ..Default::default() - }; - let gas = client.estimate_gas(call_request).await?; - - let raw_transaction = bp_eth_poa::UnsignedTransaction { - nonce, - to: contract_address, - value: U256::zero(), - gas: if double_gas { gas.saturating_mul(2.into()) } else { gas }, - gas_price: params.gas_price, - payload: encoded_call, - } - .sign_by(¶ms.signer, Some(params.chain_id)); - - let _ = client.submit_transaction(raw_transaction).await?; - Ok(()) -} diff --git a/bridges/relays/client-ethereum/src/types.rs b/bridges/relays/client-ethereum/src/types.rs deleted file mode 100644 index f589474aff1b..000000000000 --- a/bridges/relays/client-ethereum/src/types.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Common types that are used in relay <-> Ethereum node communications. - -use headers_relay::sync_types::SourceHeader; - -pub use web3::types::{Address, Bytes, CallRequest, SyncState, H256, U128, U256, U64}; - -/// When header is just received from the Ethereum node, we check that it has -/// both number and hash fields filled. -pub const HEADER_ID_PROOF: &str = "checked on retrieval; qed"; - -/// Ethereum transaction hash type. -pub type HeaderHash = H256; - -/// Ethereum transaction hash type. -pub type TransactionHash = H256; - -/// Ethereum transaction type. -pub type Transaction = web3::types::Transaction; - -/// Ethereum header type. -pub type Header = web3::types::Block; - -/// Ethereum header type used in headers sync. -#[derive(Clone, Debug, PartialEq)] -pub struct SyncHeader(Header); - -impl std::ops::Deref for SyncHeader { - type Target = Header; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// Ethereum header with transactions type. -pub type HeaderWithTransactions = web3::types::Block; - -/// Ethereum transaction receipt type. -pub type Receipt = web3::types::TransactionReceipt; - -/// Ethereum header ID. -pub type HeaderId = relay_utils::HeaderId; - -/// A raw Ethereum transaction that's been signed. -pub type SignedRawTx = Vec; - -impl From
for SyncHeader { - fn from(header: Header) -> Self { - Self(header) - } -} - -impl SourceHeader for SyncHeader { - fn id(&self) -> HeaderId { - relay_utils::HeaderId( - self.number.expect(HEADER_ID_PROOF).as_u64(), - self.hash.expect(HEADER_ID_PROOF), - ) - } - - fn parent_id(&self) -> HeaderId { - relay_utils::HeaderId(self.number.expect(HEADER_ID_PROOF).as_u64() - 1, self.parent_hash) - } -}