From 43d8c8787ded25dab5db55af5ab65f5d5882a51b Mon Sep 17 00:00:00 2001 From: Roman Gafiyatullin Date: Fri, 6 May 2022 13:07:44 +0300 Subject: [PATCH] Remove the `--unsafe-pruning` CLI-argument (step 1) (#10995) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * sc-client-db: utils::open_database(...) — return OpenDbError so that the caller could tell the `OpenDbError::DoesNotExist` clearly * sc-client-db: utils::open_database(..) — accept the `create: bool` argument * sc-client-db: pruning — optional argument in the DatabaseSettings * sc-state-db: Split `Error` into separate `Error` and `StateDbError` * StateDb::open: choose the pruning-mode depending on the requested and stored values * sc-state-db: test for different combinations of stored and requested pruning-modes * CLI-argument: mark the unsafe-pruning as deprecated * Fix tests * tests: do not specify --pruning when running the substrate over the existing storage * fix types for benches * cargo fmt * Check whether the pruning-mode and sync-mode are compatible * cargo fmt * parity-db: 0.3.11 -> 0.3.12 * sc-state-db: MetaDb::set_meta — a better doc-test * cargo fmt * make MetaDb read-only again! * Remove the stray newline (and run the CI once again please) * Last nitpicks * A more comprehensive error message --- Cargo.lock | 6 +- bin/node/cli/benches/block_production.rs | 2 +- bin/node/cli/benches/transaction_pool.rs | 2 +- bin/node/cli/tests/benchmark_block_works.rs | 1 - bin/node/cli/tests/check_block_works.rs | 2 +- bin/node/cli/tests/export_import_flow.rs | 6 +- bin/node/cli/tests/inspect_works.rs | 2 +- bin/node/testing/src/bench.rs | 2 +- client/api/src/backend.rs | 3 + client/api/src/client.rs | 3 + client/api/src/in_mem.rs | 4 + client/cli/src/config.rs | 19 +- client/cli/src/params/import_params.rs | 12 +- client/cli/src/params/pruning_params.rs | 36 +-- client/db/src/lib.rs | 81 ++++-- client/db/src/upgrade.rs | 13 +- client/db/src/utils.rs | 168 ++++++------ client/service/src/builder.rs | 6 +- client/service/src/client/client.rs | 4 + client/service/src/config.rs | 2 +- client/service/test/src/client/mod.rs | 4 +- client/state-db/src/lib.rs | 280 ++++++++++++++++---- client/state-db/src/noncanonical.rs | 226 +++++++--------- 23 files changed, 546 insertions(+), 338 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 67a9372b367ea..14d1a547a8073 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11680,7 +11680,7 @@ dependencies = [ "chrono", "lazy_static", "matchers", - "parking_lot 0.11.2", + "parking_lot 0.9.0", "regex", "serde", "serde_json", @@ -11854,9 +11854,9 @@ version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 0.1.10", "digest 0.10.3", - "rand 0.8.4", + "rand 0.6.5", "static_assertions", ] diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index ebb89c07da221..6eab08c39e5a2 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -74,7 +74,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, state_cache_size: 67108864, state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, + state_pruning: Some(PruningMode::ArchiveAll), keep_blocks: KeepBlocks::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index a889399eda83a..eb0e24d2fdd37 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -65,7 +65,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, state_cache_size: 67108864, state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, + state_pruning: Some(PruningMode::ArchiveAll), keep_blocks: KeepBlocks::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, diff --git a/bin/node/cli/tests/benchmark_block_works.rs b/bin/node/cli/tests/benchmark_block_works.rs index 359abf3e4265f..65cb474ea88d6 100644 --- a/bin/node/cli/tests/benchmark_block_works.rs +++ b/bin/node/cli/tests/benchmark_block_works.rs @@ -37,7 +37,6 @@ async fn benchmark_block_works() { .args(["benchmark", "block", "--dev"]) .arg("-d") .arg(base_dir.path()) - .args(["--pruning", "archive"]) .args(["--from", "1", "--to", "1"]) .args(["--repeat", "1"]) .args(["--execution", "wasm", "--wasm-execution", "compiled"]) diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index c5447fd2311c6..d4afc530bbcb3 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -31,7 +31,7 @@ async fn check_block_works() { common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; let status = Command::new(cargo_bin("substrate")) - .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) + .args(&["check-block", "--dev", "-d"]) .arg(base_path.path()) .arg("1") .status() diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index 48fccc8ca0293..750b4f7acc121 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -79,9 +79,9 @@ impl<'a> ExportImportRevertExecutor<'a> { // Adding "--binary" if need be. let arguments: Vec<&str> = match format_opt { FormatOpt::Binary => { - vec![&sub_command_str, "--dev", "--pruning", "archive", "--binary", "-d"] + vec![&sub_command_str, "--dev", "--binary", "-d"] }, - FormatOpt::Json => vec![&sub_command_str, "--dev", "--pruning", "archive", "-d"], + FormatOpt::Json => vec![&sub_command_str, "--dev", "-d"], }; let tmp: TempDir; @@ -161,7 +161,7 @@ impl<'a> ExportImportRevertExecutor<'a> { /// Runs the `revert` command. fn run_revert(&self) { let output = Command::new(cargo_bin("substrate")) - .args(&["revert", "--dev", "--pruning", "archive", "-d"]) + .args(&["revert", "--dev", "-d"]) .arg(&self.base_path.path()) .output() .unwrap(); diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index 6f73cc69582a9..849fb913a18d0 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -31,7 +31,7 @@ async fn inspect_works() { common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; let status = Command::new(cargo_bin("substrate")) - .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) + .args(&["inspect", "--dev", "-d"]) .arg(base_path.path()) .args(&["block", "1"]) .status() diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 7e13c0a0ac5e0..e5287dc3c4af2 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -389,7 +389,7 @@ impl BenchDb { let db_config = sc_client_db::DatabaseSettings { state_cache_size: 16 * 1024 * 1024, state_cache_child_ratio: Some((0, 100)), - state_pruning: PruningMode::ArchiveAll, + state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), keep_blocks: sc_client_db::KeepBlocks::All, }; diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 394fcd420fda1..af8552886b72e 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -546,6 +546,9 @@ pub trait Backend: AuxStore + Send + Sync { /// something that the import of a block would interfere with, e.g. importing /// a new block or calculating the best head. fn get_import_lock(&self) -> &RwLock<()>; + + /// Tells whether the backend requires full-sync mode. + fn requires_full_sync(&self) -> bool; } /// Mark for all Backend implementations, that are making use of state data, stored locally. diff --git a/client/api/src/client.rs b/client/api/src/client.rs index c4b01fbd0abbd..b809e0ee61032 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -146,6 +146,9 @@ pub trait BlockBackend { fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { Ok(self.indexed_transaction(hash)?.is_some()) } + + /// Tells whether the current client configuration requires full-sync mode. + fn requires_full_sync(&self) -> bool; } /// Provide a list of potential uncle headers for a given block. diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 8b8473287a7ca..a8a7442a8ef9f 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -795,6 +795,10 @@ where fn get_import_lock(&self) -> &RwLock<()> { &self.import_lock } + + fn requires_full_sync(&self) -> bool { + false + } } impl backend::LocalBackend for Backend where Block::Hash: Ord {} diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 5c44a05ab68dd..aef1da8193757 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -251,9 +251,9 @@ pub trait CliConfiguration: Sized { /// /// By default this is retrieved from `PruningMode` if it is available. Otherwise its /// `PruningMode::default()`. - fn state_pruning(&self, unsafe_pruning: bool, role: &Role) -> Result { + fn state_pruning(&self) -> Result> { self.pruning_params() - .map(|x| x.state_pruning(unsafe_pruning, role)) + .map(|x| x.state_pruning()) .unwrap_or_else(|| Ok(Default::default())) } @@ -494,8 +494,6 @@ pub trait CliConfiguration: Sized { let telemetry_endpoints = self.telemetry_endpoints(&chain_spec)?; let runtime_cache_size = self.runtime_cache_size()?; - let unsafe_pruning = self.import_params().map(|p| p.unsafe_pruning).unwrap_or(false); - Ok(Configuration { impl_name: C::impl_name(), impl_version: C::impl_version(), @@ -516,7 +514,7 @@ pub trait CliConfiguration: Sized { database: self.database_config(&config_dir, database_cache_size, database, &role)?, state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, - state_pruning: self.state_pruning(unsafe_pruning, &role)?, + state_pruning: self.state_pruning()?, keep_blocks: self.keep_blocks()?, wasm_method: self.wasm_method()?, wasm_runtime_overrides: self.wasm_runtime_overrides(), @@ -643,6 +641,17 @@ pub trait CliConfiguration: Sized { } } + if self.import_params().map_or(false, |p| { + #[allow(deprecated)] + p.unsafe_pruning + }) { + // according to https://github.com/substrate/issues/8103; + warn!( + "WARNING: \"--unsafe-pruning\" CLI-flag is deprecated and has no effect. \ + In future builds it will be removed, and providing this flag will lead to an error." + ); + } + Ok(()) } } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index 1df11cff8d79f..4c9b334150557 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -40,12 +40,16 @@ pub struct ImportParams { #[clap(flatten)] pub database_params: DatabaseParams, - /// Force start with unsafe pruning settings. + /// THIS IS A DEPRECATED CLI-ARGUMENT. /// - /// When running as a validator it is highly recommended to disable state - /// pruning (i.e. 'archive') which is the default. The node will refuse to - /// start as a validator if pruning is enabled unless this option is set. + /// It has been preserved in order to not break the compatibility with the existing scripts. + /// Enabling this option will lead to a runtime warning. + /// In future this option will be removed completely, thus specifying it will lead to a start + /// up error. + /// + /// Details: #[clap(long)] + #[deprecated = "According to https://github.com/paritytech/substrate/issues/8103"] pub unsafe_pruning: bool, /// Method for executing Wasm runtime code. diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index de9628ecf7ad9..0f3d1013381e6 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -18,7 +18,7 @@ use crate::error; use clap::Args; -use sc_service::{KeepBlocks, PruningMode, Role}; +use sc_service::{KeepBlocks, PruningMode}; /// Parameters to define the pruning mode #[derive(Debug, Clone, PartialEq, Args)] @@ -39,29 +39,17 @@ pub struct PruningParams { impl PruningParams { /// Get the pruning value from the parameters - pub fn state_pruning(&self, unsafe_pruning: bool, role: &Role) -> error::Result { - // by default we disable pruning if the node is an authority (i.e. - // `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the - // node is an authority and pruning is enabled explicitly, then we error - // unless `unsafe_pruning` is set. - Ok(match &self.pruning { - Some(ref s) if s == "archive" => PruningMode::ArchiveAll, - None if role.is_authority() => PruningMode::ArchiveAll, - None => PruningMode::default(), - Some(s) => { - if role.is_authority() && !unsafe_pruning { - return Err(error::Error::Input( - "Validators should run with state pruning disabled (i.e. archive). \ - You can ignore this check with `--unsafe-pruning`." - .to_string(), - )) - } - - PruningMode::keep_blocks(s.parse().map_err(|_| { - error::Error::Input("Invalid pruning mode specified".to_string()) - })?) - }, - }) + pub fn state_pruning(&self) -> error::Result> { + self.pruning + .as_ref() + .map(|s| match s.as_str() { + "archive" => Ok(PruningMode::ArchiveAll), + bc => bc + .parse() + .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string())) + .map(PruningMode::keep_blocks), + }) + .transpose() } /// Get the block pruning value from the parameters diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 72422eb82d6dd..a32a666c3c980 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -296,8 +296,8 @@ pub struct DatabaseSettings { pub state_cache_size: usize, /// Ratio of cache size dedicated to child tries. pub state_cache_child_ratio: Option<(usize, usize)>, - /// State pruning mode. - pub state_pruning: PruningMode, + /// Requested state pruning mode. + pub state_pruning: Option, /// Where to find the database. pub source: DatabaseSource, /// Block pruning mode. @@ -341,7 +341,13 @@ pub enum DatabaseSource { }, /// Use a custom already-open database. - Custom(Arc>), + Custom { + /// the handle to the custom storage + db: Arc>, + + /// if set, the `create` flag will be required to open such datasource + require_create_flag: bool, + }, } impl DatabaseSource { @@ -354,7 +360,7 @@ impl DatabaseSource { // I would think rocksdb, but later parity-db. DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path), DatabaseSource::RocksDb { path, .. } | DatabaseSource::ParityDb { path } => Some(path), - DatabaseSource::Custom(..) => None, + DatabaseSource::Custom { .. } => None, } } @@ -370,7 +376,7 @@ impl DatabaseSource { *path = p.into(); true }, - DatabaseSource::Custom(..) => false, + DatabaseSource::Custom { .. } => false, } } } @@ -381,7 +387,7 @@ impl std::fmt::Display for DatabaseSource { DatabaseSource::Auto { .. } => "Auto", DatabaseSource::RocksDb { .. } => "RocksDb", DatabaseSource::ParityDb { .. } => "ParityDb", - DatabaseSource::Custom(_) => "Custom", + DatabaseSource::Custom { .. } => "Custom", }; write!(f, "{}", name) } @@ -416,7 +422,7 @@ struct PendingBlock { struct StateMetaDb<'a>(&'a dyn Database); impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { - type Error = io::Error; + type Error = sp_database::error::DatabaseError; fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { Ok(self.0.get(columns::STATE_META, key)) @@ -1009,9 +1015,23 @@ impl Backend { /// Create a new instance of database backend. /// /// The pruning window is how old a block must be before the state is pruned. - pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { - let db = crate::utils::open_database::(&config, DatabaseType::Full)?; - Self::from_database(db as Arc<_>, canonicalization_delay, &config) + pub fn new(db_config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { + use utils::OpenDbError; + + let db_source = &db_config.source; + + let (needs_init, db) = + match crate::utils::open_database::(db_source, DatabaseType::Full, false) { + Ok(db) => (false, db), + Err(OpenDbError::DoesNotExist) => { + let db = + crate::utils::open_database::(db_source, DatabaseType::Full, true)?; + (true, db) + }, + Err(as_is) => return Err(as_is.into()), + }; + + Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init) } /// Create new memory-backed client backend for tests. @@ -1028,8 +1048,8 @@ impl Backend { let db_setting = DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - state_pruning: PruningMode::keep_blocks(keep_blocks), - source: DatabaseSource::Custom(db), + state_pruning: Some(PruningMode::keep_blocks(keep_blocks)), + source: DatabaseSource::Custom { db, require_create_flag: true }, keep_blocks: KeepBlocks::Some(keep_blocks), }; @@ -1057,18 +1077,31 @@ impl Backend { db: Arc>, canonicalization_delay: u64, config: &DatabaseSettings, + should_init: bool, ) -> ClientResult { - let is_archive_pruning = config.state_pruning.is_archive(); - let blockchain = BlockchainDb::new(db.clone())?; + let mut db_init_transaction = Transaction::new(); + + let requested_state_pruning = config.state_pruning.clone(); + let state_meta_db = StateMetaDb(db.as_ref()); let map_e = sp_blockchain::Error::from_state_db; - let state_db: StateDb<_, _> = StateDb::new( - config.state_pruning.clone(), + + let (state_db_init_commit_set, state_db) = StateDb::open( + &state_meta_db, + requested_state_pruning, !db.supports_ref_counting(), - &StateMetaDb(&*db), + should_init, ) .map_err(map_e)?; + + apply_state_commit(&mut db_init_transaction, state_db_init_commit_set); + + let state_pruning_used = state_db.pruning_mode(); + let is_archive_pruning = state_pruning_used.is_archive(); + let blockchain = BlockchainDb::new(db.clone())?; + let storage_db = StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() }; + let offchain_storage = offchain::LocalStorage::new(db.clone()); let backend = Backend { @@ -1105,6 +1138,9 @@ impl Backend { with_state: true, }); } + + db.commit(db_init_transaction)?; + Ok(backend) } @@ -2251,6 +2287,13 @@ impl sc_client_api::backend::Backend for Backend { fn get_import_lock(&self) -> &RwLock<()> { &*self.import_lock } + + fn requires_full_sync(&self) -> bool { + matches!( + self.storage.state_db.pruning_mode(), + PruningMode::ArchiveAll | PruningMode::ArchiveCanonical + ) + } } impl sc_client_api::backend::LocalBackend for Backend {} @@ -2390,8 +2433,8 @@ pub(crate) mod tests { DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - state_pruning: PruningMode::keep_blocks(1), - source: DatabaseSource::Custom(backing), + state_pruning: Some(PruningMode::keep_blocks(1)), + source: DatabaseSource::Custom { db: backing, require_create_flag: false }, keep_blocks: KeepBlocks::All, }, 0, diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index cd18554fb0d06..292905663a20b 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -190,8 +190,7 @@ fn version_file_path(path: &Path) -> PathBuf { #[cfg(test)] mod tests { use super::*; - use crate::{tests::Block, DatabaseSettings, DatabaseSource, KeepBlocks}; - use sc_state_db::PruningMode; + use crate::{tests::Block, DatabaseSource}; fn create_db(db_path: &Path, version: Option) { if let Some(version) = version { @@ -203,16 +202,12 @@ mod tests { fn open_database(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { crate::utils::open_database::( - &DatabaseSettings { - state_cache_size: 0, - state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, - source: DatabaseSource::RocksDb { path: db_path.to_owned(), cache_size: 128 }, - keep_blocks: KeepBlocks::All, - }, + &DatabaseSource::RocksDb { path: db_path.to_owned(), cache_size: 128 }, db_type, + true, ) .map(|_| ()) + .map_err(|e| sp_blockchain::Error::Backend(e.to_string())) } #[test] diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index d3cb9a994fdd3..0227e4db8bcd0 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -23,7 +23,7 @@ use std::{fmt, fs, io, path::Path, sync::Arc}; use log::{debug, info}; -use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; +use crate::{Database, DatabaseSource, DbHash}; use codec::Decode; use sp_database::Transaction; use sp_runtime::{ @@ -177,41 +177,42 @@ where }) } -fn backend_err(feat: &'static str) -> sp_blockchain::Error { - sp_blockchain::Error::Backend(feat.to_string()) -} - /// Opens the configured database. pub fn open_database( - config: &DatabaseSettings, + db_source: &DatabaseSource, db_type: DatabaseType, -) -> sp_blockchain::Result>> { + create: bool, +) -> OpenDbResult { // Maybe migrate (copy) the database to a type specific subdirectory to make it // possible that light and full databases coexist // NOTE: This function can be removed in a few releases - maybe_migrate_to_type_subdir::(&config.source, db_type).map_err(|e| { - sp_blockchain::Error::Backend(format!("Error in migration to role subdirectory: {}", e)) - })?; + maybe_migrate_to_type_subdir::(db_source, db_type)?; - open_database_at::(&config.source, db_type) + open_database_at::(db_source, db_type, create) } fn open_database_at( - source: &DatabaseSource, + db_source: &DatabaseSource, db_type: DatabaseType, -) -> sp_blockchain::Result>> { - let db: Arc> = match &source { - DatabaseSource::ParityDb { path } => open_parity_db::(path, db_type, true)?, + create: bool, +) -> OpenDbResult { + let db: Arc> = match &db_source { + DatabaseSource::ParityDb { path } => open_parity_db::(path, db_type, create)?, DatabaseSource::RocksDb { path, cache_size } => - open_kvdb_rocksdb::(path, db_type, true, *cache_size)?, - DatabaseSource::Custom(db) => db.clone(), + open_kvdb_rocksdb::(path, db_type, create, *cache_size)?, + DatabaseSource::Custom { db, require_create_flag } => { + if *require_create_flag && !create { + return Err(OpenDbError::DoesNotExist) + } + db.clone() + }, DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size } => { // check if rocksdb exists first, if not, open paritydb match open_kvdb_rocksdb::(rocksdb_path, db_type, false, *cache_size) { Ok(db) => db, Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => - open_parity_db::(paritydb_path, db_type, true)?, - Err(_) => return Err(backend_err("cannot open rocksdb. corrupted database")), + open_parity_db::(paritydb_path, db_type, create)?, + Err(as_is) => return Err(as_is), } }, }; @@ -221,12 +222,17 @@ fn open_database_at( } #[derive(Debug)] -enum OpenDbError { +pub enum OpenDbError { // constructed only when rocksdb and paritydb are disabled #[allow(dead_code)] NotEnabled(&'static str), DoesNotExist, Internal(String), + DatabaseError(sp_database::error::DatabaseError), + UnexpectedDbType { + expected: DatabaseType, + found: Vec, + }, } type OpenDbResult = Result>, OpenDbError>; @@ -239,6 +245,17 @@ impl fmt::Display for OpenDbError { OpenDbError::NotEnabled(feat) => { write!(f, "`{}` feature not enabled, database can not be opened", feat) }, + OpenDbError::DatabaseError(db_error) => { + write!(f, "Database Error: {}", db_error) + }, + OpenDbError::UnexpectedDbType { expected, found } => { + write!( + f, + "Unexpected DB-Type. Expected: {:?}, Found: {:?}", + expected.as_str().as_bytes(), + found + ) + }, } } } @@ -356,19 +373,19 @@ fn open_kvdb_rocksdb( pub fn check_database_type( db: &dyn Database, db_type: DatabaseType, -) -> sp_blockchain::Result<()> { +) -> Result<(), OpenDbError> { match db.get(COLUMN_META, meta_keys::TYPE) { Some(stored_type) => if db_type.as_str().as_bytes() != &*stored_type { - return Err(sp_blockchain::Error::Backend(format!( - "Unexpected database type. Expected: {}", - db_type.as_str() - ))) + return Err(OpenDbError::UnexpectedDbType { + expected: db_type, + found: stored_type.to_owned(), + }) }, None => { let mut transaction = Transaction::new(); transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); - db.commit(transaction)?; + db.commit(transaction).map_err(OpenDbError::DatabaseError)?; }, } @@ -378,7 +395,7 @@ pub fn check_database_type( fn maybe_migrate_to_type_subdir( source: &DatabaseSource, db_type: DatabaseType, -) -> io::Result<()> { +) -> Result<(), OpenDbError> { if let Some(p) = source.path() { let mut basedir = p.to_path_buf(); basedir.pop(); @@ -393,14 +410,14 @@ fn maybe_migrate_to_type_subdir( // database stored in the target directory and close the database on success. let mut old_source = source.clone(); old_source.set_path(&basedir); - open_database_at::(&old_source, db_type) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + open_database_at::(&old_source, db_type, false)?; info!( "Migrating database to a database-type-based subdirectory: '{:?}' -> '{:?}'", basedir, basedir.join(db_type.as_str()) ); + let mut tmp_dir = basedir.clone(); tmp_dir.pop(); tmp_dir.push("tmp"); @@ -580,9 +597,7 @@ impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { #[cfg(test)] mod tests { use super::*; - use crate::KeepBlocks; use codec::Input; - use sc_state_db::PruningMode; use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; use std::path::PathBuf; type Block = RawBlock>; @@ -601,18 +616,17 @@ mod tests { let old_db_path = base_path.path().join("chains/dev/db"); source.set_path(&old_db_path); - let settings = db_settings(source.clone()); { - let db_res = open_database::(&settings, db_type); + let db_res = open_database::(&source, db_type, true); assert!(db_res.is_ok(), "New database should be created."); assert!(old_db_path.join(db_check_file).exists()); assert!(!old_db_path.join(db_type.as_str()).join("db_version").exists()); } source.set_path(&old_db_path.join(db_type.as_str())); - let settings = db_settings(source); - let db_res = open_database::(&settings, db_type); + + let db_res = open_database::(&source, db_type, true); assert!(db_res.is_ok(), "Reopening the db with the same role should work"); // check if the database dir had been migrated assert!(!old_db_path.join(db_check_file).exists()); @@ -638,9 +652,8 @@ mod tests { let old_db_path = base_path.path().join("chains/dev/db"); let source = DatabaseSource::RocksDb { path: old_db_path.clone(), cache_size: 128 }; - let settings = db_settings(source); { - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::(&source, DatabaseType::Full, true); assert!(db_res.is_ok(), "New database should be created."); // check if the database dir had been migrated @@ -689,16 +702,6 @@ mod tests { assert_eq!(joined.remaining_len().unwrap(), Some(0)); } - fn db_settings(source: DatabaseSource) -> DatabaseSettings { - DatabaseSettings { - state_cache_size: 0, - state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, - source, - keep_blocks: KeepBlocks::All, - } - } - #[cfg(feature = "with-parity-db")] #[cfg(any(feature = "with-kvdb-rocksdb", test))] #[test] @@ -712,31 +715,36 @@ mod tests { rocksdb_path: rocksdb_path.clone(), cache_size: 128, }; - let mut settings = db_settings(source); // it should create new auto (paritydb) database { - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::(&source, DatabaseType::Full, true); assert!(db_res.is_ok(), "New database should be created."); } // it should reopen existing auto (pairtydb) database { - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::(&source, DatabaseType::Full, true); assert!(db_res.is_ok(), "Existing parity database should be reopened"); } // it should fail to open existing auto (pairtydb) database { - settings.source = DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }; - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::( + &DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }, + DatabaseType::Full, + true, + ); assert!(db_res.is_ok(), "New database should be opened."); } // it should reopen existing auto (pairtydb) database { - settings.source = DatabaseSource::ParityDb { path: paritydb_path }; - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::( + &DatabaseSource::ParityDb { path: paritydb_path }, + DatabaseType::Full, + true, + ); assert!(db_res.is_ok(), "Existing parity database should be reopened"); } } @@ -751,36 +759,44 @@ mod tests { let rocksdb_path = db_path.join("rocksdb_path"); let source = DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 }; - let mut settings = db_settings(source); // it should create new rocksdb database { - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::(&source, DatabaseType::Full, true); assert!(db_res.is_ok(), "New rocksdb database should be created"); } // it should reopen existing auto (rocksdb) database { - settings.source = DatabaseSource::Auto { - paritydb_path: paritydb_path.clone(), - rocksdb_path: rocksdb_path.clone(), - cache_size: 128, - }; - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::( + &DatabaseSource::Auto { + paritydb_path: paritydb_path.clone(), + rocksdb_path: rocksdb_path.clone(), + cache_size: 128, + }, + DatabaseType::Full, + true, + ); assert!(db_res.is_ok(), "Existing rocksdb database should be reopened"); } // it should fail to open existing auto (rocksdb) database { - settings.source = DatabaseSource::ParityDb { path: paritydb_path }; - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::( + &DatabaseSource::ParityDb { path: paritydb_path }, + DatabaseType::Full, + true, + ); assert!(db_res.is_ok(), "New paritydb database should be created"); } // it should reopen existing auto (pairtydb) database { - settings.source = DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }; - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::( + &DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }, + DatabaseType::Full, + true, + ); assert!(db_res.is_ok(), "Existing rocksdb database should be reopened"); } } @@ -795,32 +811,36 @@ mod tests { let rocksdb_path = db_path.join("rocksdb_path"); let source = DatabaseSource::ParityDb { path: paritydb_path.clone() }; - let mut settings = db_settings(source); // it should create new paritydb database { - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::(&source, DatabaseType::Full, true); assert!(db_res.is_ok(), "New database should be created."); } // it should reopen existing pairtydb database { - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::(&source, DatabaseType::Full, true); assert!(db_res.is_ok(), "Existing parity database should be reopened"); } // it should fail to open existing pairtydb database { - settings.source = - DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 }; - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::( + &DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 }, + DatabaseType::Full, + true, + ); assert!(db_res.is_ok(), "New rocksdb database should be created"); } // it should reopen existing auto (pairtydb) database { - settings.source = DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 }; - let db_res = open_database::(&settings, DatabaseType::Full); + let db_res = open_database::( + &DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 }, + DatabaseType::Full, + true, + ); assert!(db_res.is_ok(), "Existing parity database should be reopened"); } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 35ea67d8fbf85..cabf004b2f707 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -307,7 +307,7 @@ where wasm_runtime_overrides: config.wasm_runtime_overrides.clone(), no_genesis: matches!( config.network.sync_mode, - sc_network::config::SyncMode::Fast { .. } | sc_network::config::SyncMode::Warp + SyncMode::Fast { .. } | SyncMode::Warp { .. } ), wasm_runtime_substitutes, }, @@ -781,12 +781,12 @@ where return Err("Warp sync enabled, but no warp sync provider configured.".into()) } - if config.state_pruning.is_archive() { + if client.requires_full_sync() { match config.network.sync_mode { SyncMode::Fast { .. } => return Err("Fast sync doesn't work for archive nodes".into()), SyncMode::Warp => return Err("Warp sync doesn't work for archive nodes".into()), SyncMode::Full => {}, - }; + } } let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 48e4d0141c4e7..fb73b4c34c040 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1989,6 +1989,10 @@ where ) -> sp_blockchain::Result>>> { self.backend.blockchain().block_indexed_body(*id) } + + fn requires_full_sync(&self) -> bool { + self.backend.requires_full_sync() + } } impl backend::AuxStore for Client diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 56980ad14425f..e49e8b40a7b1a 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -73,7 +73,7 @@ pub struct Configuration { /// Size in percent of cache size dedicated to child tries pub state_cache_child_ratio: Option, /// State pruning settings. - pub state_pruning: PruningMode, + pub state_pruning: Option, /// Number of blocks to keep in the db. pub keep_blocks: KeepBlocks, /// Chain configuration. diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 6aa047c6393da..136efad088fae 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1199,7 +1199,7 @@ fn doesnt_import_blocks_that_revert_finality() { DatabaseSettings { state_cache_size: 1 << 20, state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, + state_pruning: Some(PruningMode::ArchiveAll), keep_blocks: KeepBlocks::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, @@ -1426,7 +1426,7 @@ fn returns_status_for_pruned_blocks() { DatabaseSettings { state_cache_size: 1 << 20, state_cache_child_ratio: None, - state_pruning: PruningMode::keep_blocks(1), + state_pruning: Some(PruningMode::keep_blocks(1)), keep_blocks: KeepBlocks::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 8fe2cdc9d9a85..794dec3b954b7 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -115,9 +115,13 @@ pub trait NodeDb { } /// Error type. -pub enum Error { +pub enum Error { /// Database backend error. Db(E), + StateDb(StateDbError), +} + +pub enum StateDbError { /// `Codec` decoding error. Decoding(codec::Error), /// Trying to canonicalize invalid block. @@ -127,11 +131,19 @@ pub enum Error { /// Trying to insert block with unknown parent. InvalidParent, /// Invalid pruning mode specified. Contains expected mode. - InvalidPruningMode(String), + IncompatiblePruningModes { stored: PruningMode, requested: PruningMode }, /// Too many unfinalized sibling blocks inserted. TooManySiblingBlocks, /// Trying to insert existing block. BlockAlreadyExists, + /// Invalid metadata + Metadata(String), +} + +impl From for Error { + fn from(inner: StateDbError) -> Self { + Self::StateDb(inner) + } } /// Pinning error type. @@ -142,21 +154,34 @@ pub enum PinError { impl From for Error { fn from(x: codec::Error) -> Self { - Error::Decoding(x) + StateDbError::Decoding(x).into() } } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Error::Db(e) => e.fmt(f), - Error::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e), - Error::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), - Error::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), - Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"), - Error::InvalidPruningMode(e) => write!(f, "Expected pruning mode: {}", e), - Error::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), - Error::BlockAlreadyExists => write!(f, "Block already exists"), + Self::Db(e) => e.fmt(f), + Self::StateDb(e) => e.fmt(f), + } + } +} + +impl fmt::Debug for StateDbError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e), + Self::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), + Self::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), + Self::InvalidParent => write!(f, "Trying to insert block with unknown parent"), + Self::IncompatiblePruningModes { stored, requested } => write!( + f, + "Incompatible pruning modes [stored: {:?}; requested: {:?}]", + stored, requested + ), + Self::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), + Self::BlockAlreadyExists => write!(f, "Block already exists"), + Self::Metadata(message) => write!(f, "Invalid metadata: {}", message), } } } @@ -180,7 +205,7 @@ pub struct CommitSet { } /// Pruning constraints. If none are specified pruning is -#[derive(Default, Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct Constraints { /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical /// states. @@ -222,11 +247,25 @@ impl PruningMode { PruningMode::Constrained(_) => PRUNING_MODE_CONSTRAINED, } } + pub fn from_id(id: &[u8]) -> Option { + match id { + PRUNING_MODE_ARCHIVE => Some(Self::ArchiveAll), + PRUNING_MODE_ARCHIVE_CANON => Some(Self::ArchiveCanonical), + PRUNING_MODE_CONSTRAINED => Some(Self::Constrained(Default::default())), + _ => None, + } + } } impl Default for PruningMode { fn default() -> Self { - PruningMode::keep_blocks(256) + PruningMode::Constrained(Default::default()) + } +} + +impl Default for Constraints { + fn default() -> Self { + Self { max_blocks: Some(256), max_mem: None } } } @@ -251,9 +290,6 @@ impl StateDbSync Result, Error> { trace!(target: "state-db", "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting); - // Check that settings match - Self::check_meta(&mode, db)?; - let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(db)?; let pruning: Option> = match mode { PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), @@ -264,19 +300,6 @@ impl StateDbSync(mode: &PruningMode, db: &D) -> Result<(), Error> { - let db_mode = db.get_meta(&to_meta_key(PRUNING_MODE, &())).map_err(Error::Db)?; - trace!(target: "state-db", - "DB pruning mode: {:?}", - db_mode.as_ref().map(|v| std::str::from_utf8(v)) - ); - match &db_mode { - Some(v) if v.as_slice() == mode.id() => Ok(()), - Some(v) => Err(Error::InvalidPruningMode(String::from_utf8_lossy(v).into())), - None => Ok(()), - } - } - fn insert_block( &mut self, hash: &BlockHash, @@ -284,25 +307,16 @@ impl StateDbSync, ) -> Result, Error> { - let mut meta = ChangeSet::default(); - if number == 0 { - // Save pruning mode when writing first block. - meta.inserted.push((to_meta_key(PRUNING_MODE, &()), self.mode.id().into())); - } - match self.mode { PruningMode::ArchiveAll => { changeset.deleted.clear(); // write changes immediately - Ok(CommitSet { data: changeset, meta }) - }, - PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { - let commit = self.non_canonical.insert(hash, number, parent_hash, changeset); - commit.map(|mut c| { - c.meta.inserted.extend(meta.inserted); - c - }) + Ok(CommitSet { data: changeset, meta: Default::default() }) }, + PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => self + .non_canonical + .insert(hash, number, parent_hash, changeset) + .map_err(Into::into), } } @@ -319,7 +333,7 @@ impl StateDbSync return Err(e), + Err(e) => return Err(e.into()), }; if let Some(ref mut pruning) = self.pruning { pruning.note_canonical(hash, &mut commit); @@ -480,13 +494,56 @@ pub struct StateDb { } impl StateDb { - /// Creates a new instance. Does not expect any metadata in the database. - pub fn new( - mode: PruningMode, - ref_counting: bool, + /// Create an instance of [`StateDb`]. + pub fn open( db: &D, - ) -> Result, Error> { - Ok(StateDb { db: RwLock::new(StateDbSync::new(mode, ref_counting, db)?) }) + requested_mode: Option, + ref_counting: bool, + should_init: bool, + ) -> Result<(CommitSet, StateDb), Error> + where + D: MetaDb, + { + let stored_mode = fetch_stored_pruning_mode(db)?; + + let selected_mode = match (should_init, stored_mode, requested_mode) { + (true, stored_mode, requested_mode) => { + assert!(stored_mode.is_none(), "The storage has just been initialized. No meta-data is expected to be found in it."); + requested_mode.unwrap_or_default() + }, + + (false, None, _) => + return Err(StateDbError::Metadata( + "An existing StateDb does not have PRUNING_MODE stored in its meta-data".into(), + ) + .into()), + + (false, Some(stored), None) => stored, + + (false, Some(stored), Some(requested)) => choose_pruning_mode(stored, requested)?, + }; + + let db_init_commit_set = if should_init { + let mut cs: CommitSet = Default::default(); + + let key = to_meta_key(PRUNING_MODE, &()); + let value = selected_mode.id().to_owned(); + + cs.meta.inserted.push((key, value)); + + cs + } else { + Default::default() + }; + + let state_db = + StateDb { db: RwLock::new(StateDbSync::new(selected_mode, ref_counting, db)?) }; + + Ok((db_init_commit_set, state_db)) + } + + pub fn pruning_mode(&self) -> PruningMode { + self.db.read().mode.clone() } /// Add a new non-canonical block. @@ -571,18 +628,51 @@ impl StateDb(db: &D) -> Result, Error> { + let meta_key_mode = to_meta_key(PRUNING_MODE, &()); + if let Some(stored_mode) = db.get_meta(&meta_key_mode).map_err(Error::Db)? { + if let Some(mode) = PruningMode::from_id(&stored_mode) { + Ok(Some(mode)) + } else { + Err(StateDbError::Metadata(format!( + "Invalid value stored for PRUNING_MODE: {:02x?}", + stored_mode + )) + .into()) + } + } else { + Ok(None) + } +} + +fn choose_pruning_mode( + stored: PruningMode, + requested: PruningMode, +) -> Result { + match (stored, requested) { + (PruningMode::ArchiveAll, PruningMode::ArchiveAll) => Ok(PruningMode::ArchiveAll), + (PruningMode::ArchiveCanonical, PruningMode::ArchiveCanonical) => + Ok(PruningMode::ArchiveCanonical), + (PruningMode::Constrained(_), PruningMode::Constrained(requested)) => + Ok(PruningMode::Constrained(requested)), + (stored, requested) => Err(StateDbError::IncompatiblePruningModes { requested, stored }), + } +} + #[cfg(test)] mod tests { use crate::{ test::{make_changeset, make_db, TestDb}, - Constraints, PruningMode, StateDb, + Constraints, Error, PruningMode, StateDb, StateDbError, }; use sp_core::H256; use std::io; fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); - let state_db = StateDb::new(settings, false, &db).unwrap(); + let (state_db_init, state_db) = + StateDb::open(&mut db, Some(settings), false, true).unwrap(); + db.commit(&state_db_init); db.commit( &state_db @@ -697,7 +787,9 @@ mod tests { #[test] fn detects_incompatible_mode() { let mut db = make_db(&[]); - let state_db = StateDb::new(PruningMode::ArchiveAll, false, &db).unwrap(); + let (state_db_init, state_db) = + StateDb::open(&mut db, Some(PruningMode::ArchiveAll), false, true).unwrap(); + db.commit(&state_db_init); db.commit( &state_db .insert_block::( @@ -709,7 +801,85 @@ mod tests { .unwrap(), ); let new_mode = PruningMode::Constrained(Constraints { max_blocks: Some(2), max_mem: None }); - let state_db: Result, _> = StateDb::new(new_mode, false, &db); - assert!(state_db.is_err()); + let state_db_open_result: Result<(_, StateDb), _> = + StateDb::open(&mut db, Some(new_mode), false, false); + assert!(state_db_open_result.is_err()); + } + + fn check_stored_and_requested_mode_compatibility( + mode_when_created: Option, + mode_when_reopened: Option, + expected_effective_mode_when_reopenned: Result, + ) { + let mut db = make_db(&[]); + let (state_db_init, state_db) = + StateDb::::open(&mut db, mode_when_created, false, true).unwrap(); + db.commit(&state_db_init); + std::mem::drop(state_db); + + let state_db_reopen_result = + StateDb::::open(&mut db, mode_when_reopened, false, false); + if let Ok(expected_mode) = expected_effective_mode_when_reopenned { + let (state_db_init, state_db_reopened) = state_db_reopen_result.unwrap(); + db.commit(&state_db_init); + assert_eq!(state_db_reopened.pruning_mode(), expected_mode,) + } else { + assert!(matches!( + state_db_reopen_result, + Err(Error::StateDb(StateDbError::IncompatiblePruningModes { .. })) + )); + } + } + + #[test] + fn pruning_mode_compatibility() { + for (created, reopened, expected) in [ + (None, None, Ok(PruningMode::keep_blocks(256))), + (None, Some(PruningMode::keep_blocks(256)), Ok(PruningMode::keep_blocks(256))), + (None, Some(PruningMode::keep_blocks(128)), Ok(PruningMode::keep_blocks(128))), + (None, Some(PruningMode::keep_blocks(512)), Ok(PruningMode::keep_blocks(512))), + (None, Some(PruningMode::ArchiveAll), Err(())), + (None, Some(PruningMode::ArchiveCanonical), Err(())), + (Some(PruningMode::keep_blocks(256)), None, Ok(PruningMode::keep_blocks(256))), + ( + Some(PruningMode::keep_blocks(256)), + Some(PruningMode::keep_blocks(256)), + Ok(PruningMode::keep_blocks(256)), + ), + ( + Some(PruningMode::keep_blocks(256)), + Some(PruningMode::keep_blocks(128)), + Ok(PruningMode::keep_blocks(128)), + ), + ( + Some(PruningMode::keep_blocks(256)), + Some(PruningMode::keep_blocks(512)), + Ok(PruningMode::keep_blocks(512)), + ), + (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveAll), Err(())), + (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveCanonical), Err(())), + (Some(PruningMode::ArchiveAll), None, Ok(PruningMode::ArchiveAll)), + (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(256)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(128)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(512)), Err(())), + ( + Some(PruningMode::ArchiveAll), + Some(PruningMode::ArchiveAll), + Ok(PruningMode::ArchiveAll), + ), + (Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveCanonical), Err(())), + (Some(PruningMode::ArchiveCanonical), None, Ok(PruningMode::ArchiveCanonical)), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(256)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(128)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(512)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::ArchiveAll), Err(())), + ( + Some(PruningMode::ArchiveCanonical), + Some(PruningMode::ArchiveCanonical), + Ok(PruningMode::ArchiveCanonical), + ), + ] { + check_stored_and_requested_mode_compatibility(created, reopened, expected); + } } } diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 30f2042863010..13cf5825b1b24 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -22,13 +22,10 @@ //! All pending changes are kept in memory until next call to `apply_pending` or //! `revert_pending` -use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb}; +use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb, StateDbError}; use codec::{Decode, Encode}; use log::trace; -use std::{ - collections::{hash_map::Entry, HashMap, VecDeque}, - fmt, -}; +use std::collections::{hash_map::Entry, HashMap, VecDeque}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; @@ -242,13 +239,13 @@ impl NonCanonicalOverlay { /// Insert a new block into the overlay. If inserted on the second level or lover expects parent /// to be present in the window. - pub fn insert( + pub fn insert( &mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet, - ) -> Result, Error> { + ) -> Result, StateDbError> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { @@ -267,7 +264,7 @@ impl NonCanonicalOverlay { front_block_number, front_block_number + self.levels.len() as u64, ); - return Err(Error::InvalidBlockNumber) + return Err(StateDbError::InvalidBlockNumber) } // check for valid parent if inserting on second level or higher if number == front_block_number { @@ -276,10 +273,10 @@ impl NonCanonicalOverlay { .as_ref() .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) { - return Err(Error::InvalidParent) + return Err(StateDbError::InvalidParent) } } else if !self.parents.contains_key(parent_hash) { - return Err(Error::InvalidParent) + return Err(StateDbError::InvalidParent) } } let level = if self.levels.is_empty() || @@ -293,10 +290,10 @@ impl NonCanonicalOverlay { }; if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize { - return Err(Error::TooManySiblingBlocks) + return Err(StateDbError::TooManySiblingBlocks) } if level.blocks.iter().any(|b| b.hash == *hash) { - return Err(Error::BlockAlreadyExists) + return Err(StateDbError::BlockAlreadyExists) } let index = level.available_index(); @@ -380,21 +377,21 @@ impl NonCanonicalOverlay { /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. /// Returns a set of changes that need to be added to the DB. - pub fn canonicalize( + pub fn canonicalize( &mut self, hash: &BlockHash, commit: &mut CommitSet, - ) -> Result<(), Error> { + ) -> Result<(), StateDbError> { trace!(target: "state-db", "Canonicalizing {:?}", hash); let level = self .levels .get(self.pending_canonicalizations.len()) - .ok_or(Error::InvalidBlock)?; + .ok_or(StateDbError::InvalidBlock)?; let index = level .blocks .iter() .position(|overlay| overlay.hash == *hash) - .ok_or(Error::InvalidBlock)?; + .ok_or(StateDbError::InvalidBlock)?; let mut discarded_journals = Vec::new(); let mut discarded_blocks = Vec::new(); @@ -640,10 +637,9 @@ mod tests { use super::{to_journal_key, NonCanonicalOverlay}; use crate::{ test::{make_changeset, make_db}, - ChangeSet, CommitSet, Error, MetaDb, + ChangeSet, CommitSet, MetaDb, StateDbError, }; use sp_core::H256; - use std::io; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { overlay.get(&H256::from_low_u64_be(key)) == @@ -665,7 +661,7 @@ mod tests { let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); let mut commit = CommitSet::default(); - overlay.canonicalize::(&H256::default(), &mut commit).unwrap(); + overlay.canonicalize(&H256::default(), &mut commit).unwrap(); } #[test] @@ -675,10 +671,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay - .insert::(&h1, 2, &H256::default(), ChangeSet::default()) - .unwrap(); - overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); + overlay.insert(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert(&h2, 1, &h1, ChangeSet::default()).unwrap(); } #[test] @@ -688,10 +682,8 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay - .insert::(&h1, 1, &H256::default(), ChangeSet::default()) - .unwrap(); - overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); + overlay.insert(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert(&h2, 3, &h1, ChangeSet::default()).unwrap(); } #[test] @@ -701,12 +693,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay - .insert::(&h1, 1, &H256::default(), ChangeSet::default()) - .unwrap(); - overlay - .insert::(&h2, 2, &H256::default(), ChangeSet::default()) - .unwrap(); + overlay.insert(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); } #[test] @@ -714,12 +702,10 @@ mod tests { let db = make_db(&[]); let h1 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay - .insert::(&h1, 2, &H256::default(), ChangeSet::default()) - .unwrap(); + overlay.insert(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); assert!(matches!( - overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()), - Err(Error::BlockAlreadyExists) + overlay.insert(&h1, 2, &H256::default(), ChangeSet::default()), + Err(StateDbError::BlockAlreadyExists) )); } @@ -730,11 +716,9 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay - .insert::(&h1, 1, &H256::default(), ChangeSet::default()) - .unwrap(); + overlay.insert(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); + overlay.canonicalize(&h2, &mut commit).unwrap(); } #[test] @@ -743,16 +727,14 @@ mod tests { let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); let changeset = make_changeset(&[3, 4], &[2]); - let insertion = overlay - .insert::(&h1, 1, &H256::default(), changeset.clone()) - .unwrap(); + let insertion = overlay.insert(&h1, 1, &H256::default(), changeset.clone()).unwrap(); assert_eq!(insertion.data.inserted.len(), 0); assert_eq!(insertion.data.deleted.len(), 0); assert_eq!(insertion.meta.inserted.len(), 2); assert_eq!(insertion.meta.deleted.len(), 0); db.commit(&insertion); let mut finalization = CommitSet::default(); - overlay.canonicalize::(&h1, &mut finalization).unwrap(); + overlay.canonicalize(&h1, &mut finalization).unwrap(); assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); assert_eq!(finalization.meta.inserted.len(), 1); @@ -769,10 +751,10 @@ mod tests { let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit( &overlay - .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .insert(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) .unwrap(), ); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); assert_eq!(db.meta.len(), 3); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); @@ -789,12 +771,12 @@ mod tests { let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit( &overlay - .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .insert(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) .unwrap(), ); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h1, &mut commit).unwrap(); + overlay.canonicalize(&h1, &mut commit).unwrap(); db.commit(&commit); overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); @@ -813,15 +795,15 @@ mod tests { let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); let changeset1 = make_changeset(&[5, 6], &[2]); let changeset2 = make_changeset(&[7, 8], &[5, 3]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); + db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset1).unwrap()); assert!(contains(&overlay, 5)); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); + db.commit(&overlay.insert(&h2, 2, &h1, changeset2).unwrap()); assert!(contains(&overlay, 7)); assert!(contains(&overlay, 5)); assert_eq!(overlay.levels.len(), 2); assert_eq!(overlay.parents.len(), 2); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h1, &mut commit).unwrap(); + overlay.canonicalize(&h1, &mut commit).unwrap(); db.commit(&commit); assert!(contains(&overlay, 5)); assert_eq!(overlay.levels.len(), 2); @@ -832,7 +814,7 @@ mod tests { assert!(!contains(&overlay, 5)); assert!(contains(&overlay, 7)); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); + overlay.canonicalize(&h2, &mut commit).unwrap(); db.commit(&commit); overlay.apply_pending(); assert_eq!(overlay.levels.len(), 0); @@ -847,11 +829,11 @@ mod tests { let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); + db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); + db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); assert!(contains(&overlay, 1)); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_1, &mut commit).unwrap(); + overlay.canonicalize(&h_1, &mut commit).unwrap(); db.commit(&commit); assert!(contains(&overlay, 1)); overlay.apply_pending(); @@ -866,18 +848,14 @@ mod tests { let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); let changeset = make_changeset(&[], &[]); - db.commit( - &overlay - .insert::(&h1, 1, &H256::default(), changeset.clone()) - .unwrap(), - ); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); + db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset.clone()).unwrap()); + db.commit(&overlay.insert(&h2, 2, &h1, changeset.clone()).unwrap()); overlay.apply_pending(); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h1, &mut commit).unwrap(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); + overlay.canonicalize(&h1, &mut commit).unwrap(); + overlay.canonicalize(&h2, &mut commit).unwrap(); db.commit(&commit); - db.commit(&overlay.insert::(&h3, 3, &h2, changeset.clone()).unwrap()); + db.commit(&overlay.insert(&h3, 3, &h2, changeset.clone()).unwrap()); overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); } @@ -912,21 +890,21 @@ mod tests { let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); + db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert::(&h_1_1, 2, &h_1, c_1_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2, 2, &h_1, c_1_2).unwrap()); + db.commit(&overlay.insert(&h_1_1, 2, &h_1, c_1_1).unwrap()); + db.commit(&overlay.insert(&h_1_2, 2, &h_1, c_1_2).unwrap()); - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); + db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); - db.commit(&overlay.insert::(&h_2_1, 2, &h_2, c_2_1).unwrap()); - db.commit(&overlay.insert::(&h_2_2, 2, &h_2, c_2_2).unwrap()); + db.commit(&overlay.insert(&h_2_1, 2, &h_2, c_2_1).unwrap()); + db.commit(&overlay.insert(&h_2_2, 2, &h_2, c_2_2).unwrap()); - db.commit(&overlay.insert::(&h_1_1_1, 3, &h_1_1, c_1_1_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2_1, 3, &h_1_2, c_1_2_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2_2, 3, &h_1_2, c_1_2_2).unwrap()); - db.commit(&overlay.insert::(&h_1_2_3, 3, &h_1_2, c_1_2_3).unwrap()); - db.commit(&overlay.insert::(&h_2_1_1, 3, &h_2_1, c_2_1_1).unwrap()); + db.commit(&overlay.insert(&h_1_1_1, 3, &h_1_1, c_1_1_1).unwrap()); + db.commit(&overlay.insert(&h_1_2_1, 3, &h_1_2, c_1_2_1).unwrap()); + db.commit(&overlay.insert(&h_1_2_2, 3, &h_1_2, c_1_2_2).unwrap()); + db.commit(&overlay.insert(&h_1_2_3, 3, &h_1_2, c_1_2_3).unwrap()); + db.commit(&overlay.insert(&h_2_1_1, 3, &h_2_1, c_2_1_1).unwrap()); assert!(contains(&overlay, 2)); assert!(contains(&overlay, 11)); @@ -946,7 +924,7 @@ mod tests { // canonicalize 1. 2 and all its children should be discarded let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_1, &mut commit).unwrap(); + overlay.canonicalize(&h_1, &mut commit).unwrap(); db.commit(&commit); overlay.apply_pending(); assert_eq!(overlay.levels.len(), 2); @@ -967,7 +945,7 @@ mod tests { // canonicalize 1_2. 1_1 and all its children should be discarded let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_1_2, &mut commit).unwrap(); + overlay.canonicalize(&h_1_2, &mut commit).unwrap(); db.commit(&commit); overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); @@ -984,7 +962,7 @@ mod tests { // canonicalize 1_2_2 let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_1_2_2, &mut commit).unwrap(); + overlay.canonicalize(&h_1_2_2, &mut commit).unwrap(); db.commit(&commit); overlay.apply_pending(); assert_eq!(overlay.levels.len(), 0); @@ -1002,8 +980,8 @@ mod tests { assert!(overlay.revert_one().is_none()); let changeset1 = make_changeset(&[5, 6], &[2]); let changeset2 = make_changeset(&[7, 8], &[5, 3]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); + db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset1).unwrap()); + db.commit(&overlay.insert(&h2, 2, &h1, changeset2).unwrap()); assert!(contains(&overlay, 7)); db.commit(&overlay.revert_one().unwrap()); assert_eq!(overlay.parents.len(), 1); @@ -1025,10 +1003,10 @@ mod tests { let changeset1 = make_changeset(&[5, 6], &[2]); let changeset2 = make_changeset(&[7, 8], &[5, 3]); let changeset3 = make_changeset(&[9], &[]); - overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap(); + overlay.insert(&h1, 1, &H256::default(), changeset1).unwrap(); assert!(contains(&overlay, 5)); - overlay.insert::(&h2_1, 2, &h1, changeset2).unwrap(); - overlay.insert::(&h2_2, 2, &h1, changeset3).unwrap(); + overlay.insert(&h2_1, 2, &h1, changeset2).unwrap(); + overlay.insert(&h2_2, 2, &h1, changeset3).unwrap(); assert!(contains(&overlay, 7)); assert!(contains(&overlay, 5)); assert!(contains(&overlay, 9)); @@ -1052,14 +1030,14 @@ mod tests { let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); + db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); + db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); overlay.apply_pending(); overlay.pin(&h_1); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_2, &mut commit).unwrap(); + overlay.canonicalize(&h_2, &mut commit).unwrap(); db.commit(&commit); overlay.apply_pending(); assert!(contains(&overlay, 1)); @@ -1082,15 +1060,15 @@ mod tests { let (h_3, c_3) = (H256::random(), make_changeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); - db.commit(&overlay.insert::(&h_3, 1, &H256::default(), c_3).unwrap()); + db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); + db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); + db.commit(&overlay.insert(&h_3, 1, &H256::default(), c_3).unwrap()); overlay.apply_pending(); overlay.pin(&h_1); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_3, &mut commit).unwrap(); + overlay.canonicalize(&h_3, &mut commit).unwrap(); db.commit(&commit); overlay.apply_pending(); // 1_2 should be discarded, 1_1 is pinned @@ -1112,15 +1090,15 @@ mod tests { let (h_21, c_21) = (H256::random(), make_changeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_11, 1, &H256::default(), c_11).unwrap()); - db.commit(&overlay.insert::(&h_12, 1, &H256::default(), c_12).unwrap()); - db.commit(&overlay.insert::(&h_21, 2, &h_11, c_21).unwrap()); + db.commit(&overlay.insert(&h_11, 1, &H256::default(), c_11).unwrap()); + db.commit(&overlay.insert(&h_12, 1, &H256::default(), c_12).unwrap()); + db.commit(&overlay.insert(&h_21, 2, &h_11, c_21).unwrap()); overlay.apply_pending(); overlay.pin(&h_21); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_12, &mut commit).unwrap(); + overlay.canonicalize(&h_12, &mut commit).unwrap(); db.commit(&commit); overlay.apply_pending(); // 1_1 and 2_1 should be both pinned @@ -1141,18 +1119,14 @@ mod tests { let h21 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit( - &overlay - .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) - .unwrap(), - ); - db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); - db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); - db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); + db.commit(&overlay.insert(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); + db.commit(&overlay.insert(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); + db.commit(&overlay.insert(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); + db.commit(&overlay.insert(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); + db.commit(&overlay.insert(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); let mut commit = CommitSet::default(); - overlay.canonicalize::(&root, &mut commit).unwrap(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB + overlay.canonicalize(&root, &mut commit).unwrap(); + overlay.canonicalize(&h2, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); @@ -1166,7 +1140,7 @@ mod tests { assert!(contains(&overlay, 21)); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h21, &mut commit).unwrap(); // h11 should stay in the DB + overlay.canonicalize(&h21, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); overlay.apply_pending(); assert!(!contains(&overlay, 21)); @@ -1183,25 +1157,21 @@ mod tests { let h21 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit( - &overlay - .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) - .unwrap(), - ); - db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); - db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); - db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); + db.commit(&overlay.insert(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); + db.commit(&overlay.insert(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); + db.commit(&overlay.insert(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); + db.commit(&overlay.insert(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); + db.commit(&overlay.insert(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); let mut commit = CommitSet::default(); - overlay.canonicalize::(&root, &mut commit).unwrap(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB + overlay.canonicalize(&root, &mut commit).unwrap(); + overlay.canonicalize(&h2, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); overlay.apply_pending(); // add another block at top level. It should reuse journal index 0 of previously discarded // block let h22 = H256::random(); - db.commit(&overlay.insert::(&h22, 12, &h2, make_changeset(&[22], &[])).unwrap()); + db.commit(&overlay.insert(&h22, 12, &h2, make_changeset(&[22], &[])).unwrap()); assert_eq!(overlay.levels[0].blocks[0].journal_index, 1); assert_eq!(overlay.levels[0].blocks[1].journal_index, 0); @@ -1221,15 +1191,11 @@ mod tests { let h21 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit( - &overlay - .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) - .unwrap(), - ); - db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); - db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); - db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); + db.commit(&overlay.insert(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); + db.commit(&overlay.insert(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); + db.commit(&overlay.insert(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); + db.commit(&overlay.insert(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); + db.commit(&overlay.insert(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); assert!(overlay.remove(&h1).is_none()); assert!(overlay.remove(&h2).is_none()); assert_eq!(overlay.levels.len(), 3);