diff --git a/Cargo.lock b/Cargo.lock index 7b1a05b7067e2..29d15b9e015cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10070,6 +10070,7 @@ dependencies = [ name = "sp-state-machine" version = "0.12.0" dependencies = [ + "assert_matches", "hash-db", "hex-literal", "log", diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 8bc4a0f4893c7..d3d43e742d026 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -393,10 +393,11 @@ impl StateBackend> for BenchmarkingState { &self, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, + start_at: Option<&[u8]>, f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.apply_to_keys_while(child_info, prefix, f) + state.apply_to_keys_while(child_info, prefix, start_at, f) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index a32a666c3c980..5f15b2d6fe969 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -226,9 +226,10 @@ impl StateBackend> for RefTrackingState { &self, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, + start_at: Option<&[u8]>, f: F, ) { - self.state.apply_to_keys_while(child_info, prefix, f) + self.state.apply_to_keys_while(child_info, prefix, start_at, f) } fn for_child_keys_with_prefix( diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 9dada92b066ea..8326946999946 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -639,9 +639,10 @@ impl>, B: BlockT> StateBackend> for Cachin &self, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, + start_at: Option<&[u8]>, f: F, ) { - self.state.apply_to_keys_while(child_info, prefix, f) + self.state.apply_to_keys_while(child_info, prefix, start_at, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -839,9 +840,10 @@ impl>, B: BlockT> StateBackend> &self, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, + start_at: Option<&[u8]>, f: F, ) { - self.caching_state().apply_to_keys_while(child_info, prefix, f) + self.caching_state().apply_to_keys_while(child_info, prefix, start_at, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index d95c5de8132ea..b4f852685842d 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -94,7 +94,9 @@ impl, I: 'static> List { /// this function should generally not be used in production as it could lead to a very large /// number of storage accesses. pub(crate) fn unsafe_clear() { + #[allow(deprecated)] crate::ListBags::::remove_all(None); + #[allow(deprecated)] crate::ListNodes::::remove_all(); } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index 0832ebadac9c6..19e699a855461 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -56,6 +56,7 @@ mod v4 { use super::*; pub fn migrate() -> Weight { + #[allow(deprecated)] migration::remove_storage_prefix(>::name().as_bytes(), b"CurrentSchedule", b""); T::DbWeight::get().writes(1) } diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index af6dbe3c62bfc..e73eb8d3bd55e 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -27,12 +27,12 @@ use crate::{ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchError, DispatchResult}, - storage::child::{self, ChildInfo, KillStorageResult}, + storage::child::{self, ChildInfo}, weights::Weight, }; use scale_info::TypeInfo; use sp_core::crypto::UncheckedFrom; -use sp_io::hashing::blake2_256; +use sp_io::{hashing::blake2_256, KillStorageResult}; use sp_runtime::{ traits::{Hash, Zero}, RuntimeDebug, @@ -266,16 +266,16 @@ where while !queue.is_empty() && remaining_key_budget > 0 { // Cannot panic due to loop condition let trie = &mut queue[0]; - let outcome = - child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget)); + #[allow(deprecated)] + let outcome = child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget)); let keys_removed = match outcome { // This happens when our budget wasn't large enough to remove all keys. - KillStorageResult::SomeRemaining(count) => count, - KillStorageResult::AllRemoved(count) => { + KillStorageResult::SomeRemaining(c) => c, + KillStorageResult::AllRemoved(c) => { // We do not care to preserve order. The contract is deleted already and // no one waits for the trie to be deleted. queue.swap_remove(0); - count + c }, }; remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed); diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 493f20924203c..44dd6aff09f0c 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -148,6 +148,7 @@ fn clean() { >::kill(); >::kill(); >::kill(); + #[allow(deprecated)] >::remove_all(None); } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 05b7618b286a6..f190f6672f309 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -900,7 +900,9 @@ impl OneSessionHandler for Pallet { // Remove all received heartbeats and number of authored blocks from the // current session, they have already been processed and won't be needed // anymore. + #[allow(deprecated)] ReceivedHeartbeats::::remove_prefix(&T::ValidatorSet::session_index(), None); + #[allow(deprecated)] AuthoredBlocks::::remove_prefix(&T::ValidatorSet::session_index(), None); if offenders.is_empty() { diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 9b0fc87587e3a..a005c051a1abc 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -565,6 +565,7 @@ impl Pallet { }, ); + #[allow(deprecated)] frame_support::storage::migration::remove_storage_prefix( Self::name().as_bytes(), b"StorageVersion", @@ -601,6 +602,7 @@ impl Pallet { ) }); + #[allow(deprecated)] frame_support::storage::migration::remove_storage_prefix( Self::name().as_bytes(), b"StorageVersion", diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 2973ecd68092e..5a993f72f32d2 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -1067,6 +1067,7 @@ pub mod pallet { Founder::::kill(); Rules::::kill(); Candidates::::kill(); + #[allow(deprecated)] SuspendedCandidates::::remove_all(None); Self::deposit_event(Event::::Unfounded { founder }); Ok(()) @@ -1511,6 +1512,7 @@ impl, I: 'static> Pallet { .collect::>(); // Clean up all votes. + #[allow(deprecated)] >::remove_all(None); // Reward one of the voters who voted the right way. @@ -1695,6 +1697,7 @@ impl, I: 'static> Pallet { } // Clean up all votes. + #[allow(deprecated)] >::remove_all(None); } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 4de1ea36cb591..66265ab1f135e 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -585,8 +585,11 @@ impl Pallet { /// Clear all era information for given era. pub(crate) fn clear_era_information(era_index: EraIndex) { + #[allow(deprecated)] >::remove_prefix(era_index, None); + #[allow(deprecated)] >::remove_prefix(era_index, None); + #[allow(deprecated)] >::remove_prefix(era_index, None); >::remove(era_index); >::remove(era_index); @@ -984,9 +987,13 @@ impl ElectionDataProvider for Pallet { #[cfg(feature = "runtime-benchmarks")] fn clear() { + #[allow(deprecated)] >::remove_all(None); + #[allow(deprecated)] >::remove_all(None); + #[allow(deprecated)] >::remove_all(); + #[allow(deprecated)] >::remove_all(); T::VoterList::unsafe_clear(); @@ -1368,7 +1375,9 @@ impl SortedListProvider for UseNominatorsAndValidatorsM fn unsafe_clear() { // NOTE: Caller must ensure this doesn't lead to too many storage accesses. This is a // condition of SortedListProvider::unsafe_clear. + #[allow(deprecated)] Nominators::::remove_all(); + #[allow(deprecated)] Validators::::remove_all(); } } diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 25954a3699b31..7372c4390f816 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -557,7 +557,9 @@ impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { /// Clear slashing metadata for an obsolete era. pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { + #[allow(deprecated)] as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era, None); + #[allow(deprecated)] as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era, None); } diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index f30020597db45..ba67292ddc434 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -36,9 +36,11 @@ const SEED: u32 = 0; /// This function removes all validators and nominators from storage. pub fn clear_validators_and_nominators() { + #[allow(deprecated)] Validators::::remove_all(); // whenever we touch nominators counter we should update `T::VoterList` as well. + #[allow(deprecated)] Nominators::::remove_all(); // NOTE: safe to call outside block production diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 03f1553293a47..178fddb61b0a2 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1097,8 +1097,12 @@ pub mod tests { DoubleMap::insert(&(key1 + 1), &key2, &4u64); DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); assert!(matches!( - DoubleMap::remove_prefix(&key1, None), - sp_io::KillStorageResult::AllRemoved(0), // all in overlay + DoubleMap::clear_prefix(&key1, u32::max_value(), None), + // Note this is the incorrect answer (for now), since we are using v2 of + // `clear_prefix`. + // When we switch to v3, then this will become: + // sp_io::MultiRemovalResults::NoneLeft { db: 0, total: 2 }, + sp_io::MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 }, )); assert_eq!(DoubleMap::get(&key1, &key2), 0u64); assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index e20d4fe0cd4d3..397daaa82a677 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -21,7 +21,7 @@ // NOTE: could replace unhashed by having only one kind of storage (top trie being the child info // of null length parent storage key). -pub use crate::sp_io::KillStorageResult; +pub use crate::sp_io::{KillStorageResult, MultiRemovalResults}; use crate::sp_std::prelude::*; use codec::{Codec, Decode, Encode}; pub use sp_core::storage::{ChildInfo, ChildType, StateVersion}; @@ -136,6 +136,7 @@ pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool { /// not make much sense because it is not cumulative when called inside the same block. /// Use this function to distribute the deletion of a single child trie across multiple /// blocks. +#[deprecated = "Use `clear_storage` instead"] pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageResult { match child_info.child_type() { ChildType::ParentKeyId => @@ -143,6 +144,58 @@ pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageRe } } +/// Partially clear the child storage of each key-value pair. +/// +/// # Limit +/// +/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the +/// maximum number of backend iterations which may be done by this operation and as such +/// represents the maximum number of backend deletions which may happen. A *limit* of zero +/// implies that no keys will be deleted, though there may be a single iteration done. +/// +/// The limit can be used to partially delete storage items in case it is too large or costly +/// to delete all in a single operation. +/// +/// # Cursor +/// +/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be +/// passed once (in the initial call) for any attempt to clear storage. In general, subsequent calls +/// operating on the same prefix should pass `Some` and this value should be equal to the +/// previous call result's `maybe_cursor` field. The only exception to this is when you can +/// guarantee that the subsequent call is in a new block; in this case the previous call's result +/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful +/// then making this call solely from a block-hook such as `on_initialize`. +/// +/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the +/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. +/// +/// NOTE: After the initial call for any given child storage, it is important that no keys further +/// keys are inserted. If so, then they may or may not be deleted by subsequent calls. +/// +/// # Note +/// +/// Please note that keys which are residing in the overlay for the child are deleted without +/// counting towards the `limit`. +pub fn clear_storage( + child_info: &ChildInfo, + maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, +) -> MultiRemovalResults { + // TODO: Once the network has upgraded to include the new host functions, this code can be + // enabled. + // sp_io::default_child_storage::storage_kill(prefix, maybe_limit, maybe_cursor) + let r = match child_info.child_type() { + ChildType::ParentKeyId => + sp_io::default_child_storage::storage_kill(child_info.storage_key(), maybe_limit), + }; + use sp_io::KillStorageResult::*; + let (maybe_cursor, backend) = match r { + AllRemoved(db) => (None, db), + SomeRemaining(db) => (Some(child_info.storage_key().to_vec()), db), + }; + MultiRemovalResults { maybe_cursor, backend, unique: backend, loops: backend } +} + /// Ensure `key` has no explicit entry in storage. pub fn kill(child_info: &ChildInfo, key: &[u8]) { match child_info.child_type() { diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 1c308bc351902..90ec92c622b1f 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -202,11 +202,28 @@ where unhashed::kill(&Self::storage_double_map_final_key(k1, k2)) } - fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult + fn remove_prefix(k1: KArg1, maybe_limit: Option) -> sp_io::KillStorageResult where KArg1: EncodeLike, { - unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref(), limit) + unhashed::clear_prefix(Self::storage_double_map_final_key1(k1).as_ref(), maybe_limit, None) + .into() + } + + fn clear_prefix( + k1: KArg1, + limit: u32, + maybe_cursor: Option<&[u8]>, + ) -> sp_io::MultiRemovalResults + where + KArg1: EncodeLike, + { + unhashed::clear_prefix( + Self::storage_double_map_final_key1(k1).as_ref(), + Some(limit), + maybe_cursor, + ) + .into() } fn iter_prefix_values(k1: KArg1) -> storage::PrefixIterator diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index 18c912cf294ef..0175d681df1d4 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -183,7 +183,22 @@ where where K: HasKeyPrefix, { - unhashed::kill_prefix(&Self::storage_n_map_partial_key(partial_key), limit) + unhashed::clear_prefix(&Self::storage_n_map_partial_key(partial_key), limit, None).into() + } + + fn clear_prefix( + partial_key: KP, + limit: u32, + maybe_cursor: Option<&[u8]>, + ) -> sp_io::MultiRemovalResults + where + K: HasKeyPrefix, + { + unhashed::clear_prefix( + &Self::storage_n_map_partial_key(partial_key), + Some(limit), + maybe_cursor, + ) } fn iter_prefix_values(partial_key: KP) -> PrefixIterator diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index 713c2b0f3fe01..67001fc4e1f42 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -256,12 +256,43 @@ pub fn put_storage_value(module: &[u8], item: &[u8], hash: &[u8], val /// Remove all items under a storage prefix by the `module`, the map's `item` name and the key /// `hash`. +#[deprecated = "Use `clear_storage_prefix` instead"] pub fn remove_storage_prefix(module: &[u8], item: &[u8], hash: &[u8]) { let mut key = vec![0u8; 32 + hash.len()]; let storage_prefix = storage_prefix(module, item); key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); - frame_support::storage::unhashed::kill_prefix(&key, None); + let _ = frame_support::storage::unhashed::clear_prefix(&key, None, None); +} + +/// Attempt to remove all values under a storage prefix by the `module`, the map's `item` name and +/// the key `hash`. +/// +/// All values in the client overlay will be deleted, if `maybe_limit` is `Some` then up to +/// that number of values are deleted from the client backend by seeking and reading that number of +/// storage values plus one. If `maybe_limit` is `None` then all values in the client backend are +/// deleted. This is potentially unsafe since it's an unbounded operation. +/// +/// ## Cursors +/// +/// The `maybe_cursor` parameter should be `None` for the first call to initial removal. +/// If the resultant `maybe_cursor` is `Some`, then another call is required to complete the +/// removal operation. This value must be passed in as the subsequent call's `maybe_cursor` +/// parameter. If the resultant `maybe_cursor` is `None`, then the operation is complete and no +/// items remain in storage provided that no items were added between the first calls and the +/// final call. +pub fn clear_storage_prefix( + module: &[u8], + item: &[u8], + hash: &[u8], + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, +) -> sp_io::MultiRemovalResults { + let mut key = vec![0u8; 32 + hash.len()]; + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); + key[32..].copy_from_slice(hash); + frame_support::storage::unhashed::clear_prefix(&key, maybe_limit, maybe_cursor) } /// Take a particular item in storage by the `module`, the map's `item` name and the key `hash`. diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 7135ead850b6d..c9b4cf75f8e3b 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -514,10 +514,34 @@ pub trait StorageDoubleMap { /// Calling this multiple times per block with a `limit` set leads always to the same keys being /// removed and the same result being returned. This happens because the keys to delete in the /// overlay are not taken into account when deleting keys in the backend. + #[deprecated = "Use `clear_prefix` instead"] fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult where KArg1: ?Sized + EncodeLike; + /// Remove all values under the first key `k1` in the overlay and up to `maybe_limit` in the + /// backend. + /// + /// All values in the client overlay will be deleted, if `maybe_limit` is `Some` then up to + /// that number of values are deleted from the client backend, otherwise all values in the + /// client backend are deleted. + /// + /// ## Cursors + /// + /// The `maybe_cursor` parameter should be `None` for the first call to initial removal. + /// If the resultant `maybe_cursor` is `Some`, then another call is required to complete the + /// removal operation. This value must be passed in as the subsequent call's `maybe_cursor` + /// parameter. If the resultant `maybe_cursor` is `None`, then the operation is complete and no + /// items remain in storage provided that no items were added between the first calls and the + /// final call. + fn clear_prefix( + k1: KArg1, + limit: u32, + maybe_cursor: Option<&[u8]>, + ) -> sp_io::MultiRemovalResults + where + KArg1: ?Sized + EncodeLike; + /// Iterate over values that share the first key. fn iter_prefix_values(k1: KArg1) -> PrefixIterator where @@ -655,10 +679,42 @@ pub trait StorageNMap { /// Calling this multiple times per block with a `limit` set leads always to the same keys being /// removed and the same result being returned. This happens because the keys to delete in the /// overlay are not taken into account when deleting keys in the backend. + #[deprecated = "Use `clear_prefix` instead"] fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult where K: HasKeyPrefix; + /// Attempt to remove items from the map matching a `partial_key` prefix. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map which match the `partial key`. If so, then the map may not be + /// empty when the resultant `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must be provided in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map and `partial_key`. Subsequent + /// calls operating on the same map/`partial_key` should always pass `Some`, and this should be + /// equal to the previous call result's `maybe_cursor` field. + fn clear_prefix( + partial_key: KP, + limit: u32, + maybe_cursor: Option<&[u8]>, + ) -> sp_io::MultiRemovalResults + where + K: HasKeyPrefix; + /// Iterate over values that share the partial prefix key. fn iter_prefix_values(partial_key: KP) -> PrefixIterator where @@ -1109,8 +1165,36 @@ pub trait StoragePrefixedMap { /// Calling this multiple times per block with a `limit` set leads always to the same keys being /// removed and the same result being returned. This happens because the keys to delete in the /// overlay are not taken into account when deleting keys in the backend. + #[deprecated = "Use `clear` instead"] fn remove_all(limit: Option) -> sp_io::KillStorageResult { - sp_io::storage::clear_prefix(&Self::final_prefix(), limit) + unhashed::clear_prefix(&Self::final_prefix(), limit, None).into() + } + + /// Attempt to remove all items from the map. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map. If so, then the map may not be empty when the resultant + /// `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must always be provided through in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map. Subsequent calls + /// operating on the same map should always pass `Some`, and this should be equal to the + /// previous call result's `maybe_cursor` field. + fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults { + unhashed::clear_prefix(&Self::final_prefix(), Some(limit), maybe_cursor) } /// Iter over all value of the storage. @@ -1425,7 +1509,7 @@ mod test { assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3, 4]); // test removal - MyStorage::remove_all(None); + let _ = MyStorage::clear(u32::max_value(), None); assert!(MyStorage::iter_values().collect::>().is_empty()); // test migration @@ -1435,7 +1519,7 @@ mod test { assert!(MyStorage::iter_values().collect::>().is_empty()); MyStorage::translate_values(|v: u32| Some(v as u64)); assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2]); - MyStorage::remove_all(None); + let _ = MyStorage::clear(u32::max_value(), None); // test migration 2 unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u128); @@ -1447,7 +1531,7 @@ mod test { assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); MyStorage::translate_values(|v: u128| Some(v as u64)); assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); - MyStorage::remove_all(None); + let _ = MyStorage::clear(u32::max_value(), None); // test that other values are not modified. assert_eq!(unhashed::get(&key_before[..]), Some(32u64)); diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index a2160ede52fe9..44fcbf607935f 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -31,6 +31,7 @@ use crate::{ Never, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref}; +use sp_io::MultiRemovalResults; use sp_runtime::traits::Saturating; use sp_std::prelude::*; @@ -273,13 +274,44 @@ where ::Map::migrate_key::(key) } - /// Remove all value of the storage. + /// Remove all values in the map. + #[deprecated = "Use `clear` instead"] pub fn remove_all() { - // NOTE: it is not possible to remove up to some limit because - // `sp_io::storage::clear_prefix` and `StorageMap::remove_all` don't give the number of - // value removed from the overlay. - CounterFor::::set(0u32); + #[allow(deprecated)] ::Map::remove_all(None); + CounterFor::::kill(); + } + + /// Attempt to remove all items from the map. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map. If so, then the map may not be empty when the resultant + /// `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must always be provided through in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map. Subsequent calls + /// operating on the same map should always pass `Some`, and this should be equal to the + /// previous call result's `maybe_cursor` field. + pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> MultiRemovalResults { + let result = ::Map::clear(limit, maybe_cursor); + match result.maybe_cursor { + None => CounterFor::::kill(), + Some(_) => CounterFor::::mutate(|x| x.saturating_reduce(result.unique)), + } + result } /// Iter over all value of the storage. @@ -691,7 +723,7 @@ mod test { assert_eq!(A::count(), 2); // Remove all. - A::remove_all(); + let _ = A::clear(u32::max_value(), None); assert_eq!(A::count(), 0); assert_eq!(A::initialize_counter(), 0); @@ -922,7 +954,7 @@ mod test { assert_eq!(B::count(), 2); // Remove all. - B::remove_all(); + let _ = B::clear(u32::max_value(), None); assert_eq!(B::count(), 0); assert_eq!(B::initialize_counter(), 0); diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index 42b903128934d..5662087cc896f 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -229,13 +229,53 @@ where /// Calling this multiple times per block with a `limit` set leads always to the same keys being /// removed and the same result being returned. This happens because the keys to delete in the /// overlay are not taken into account when deleting keys in the backend. + #[deprecated = "Use `clear_prefix` instead"] pub fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult where KArg1: ?Sized + EncodeLike, { + #[allow(deprecated)] >::remove_prefix(k1, limit) } + /// Attempt to remove items from the map matching a `first_key` prefix. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map which match the `first_key`. If so, then the map may not be + /// empty when the resultant `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must always be provided through in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map and `first_key`. Subsequent + /// calls operating on the same map/`first_key` should always pass `Some`, and this should be + /// equal to the previous call result's `maybe_cursor` field. + pub fn clear_prefix( + first_key: KArg1, + limit: u32, + maybe_cursor: Option<&[u8]>, + ) -> sp_io::MultiRemovalResults + where + KArg1: ?Sized + EncodeLike, + { + >::clear_prefix( + first_key, + limit, + maybe_cursor, + ) + } + /// Iterate over values that share the first key. pub fn iter_prefix_values(k1: KArg1) -> crate::storage::PrefixIterator where @@ -359,10 +399,39 @@ where /// Calling this multiple times per block with a `limit` set leads always to the same keys being /// removed and the same result being returned. This happens because the keys to delete in the /// overlay are not taken into account when deleting keys in the backend. + #[deprecated = "Use `clear` instead"] pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { + #[allow(deprecated)] >::remove_all(limit) } + /// Attempt to remove all items from the map. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map. If so, then the map may not be empty when the resultant + /// `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must always be provided through in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen.A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map. Subsequent calls + /// operating on the same map should always pass `Some`, and this should be equal to the + /// previous call result's `maybe_cursor` field. + pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults { + >::clear(limit, maybe_cursor) + } + /// Iter over all value of the storage. /// /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. @@ -768,7 +837,7 @@ mod test { A::insert(3, 30, 10); A::insert(4, 40, 10); - A::remove_all(None); + let _ = A::clear(u32::max_value(), None); assert_eq!(A::contains_key(3, 30), false); assert_eq!(A::contains_key(4, 40), false); @@ -829,7 +898,7 @@ mod test { ] ); - WithLen::remove_all(None); + let _ = WithLen::clear(u32::max_value(), None); assert_eq!(WithLen::decode_len(3, 30), None); WithLen::append(0, 100, 10); assert_eq!(WithLen::decode_len(0, 100), Some(1)); @@ -843,7 +912,7 @@ mod test { assert_eq!(A::iter_prefix_values(4).collect::>(), vec![13, 14]); assert_eq!(A::iter_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); - A::remove_prefix(3, None); + let _ = A::clear_prefix(3, u32::max_value(), None); assert_eq!(A::iter_prefix(3).collect::>(), vec![]); assert_eq!(A::iter_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index bc32c58da8db6..6dac5b3756598 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -247,10 +247,39 @@ where /// Calling this multiple times per block with a `limit` set leads always to the same keys being /// removed and the same result being returned. This happens because the keys to delete in the /// overlay are not taken into account when deleting keys in the backend. + #[deprecated = "Use `clear` instead"] pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { + #[allow(deprecated)] >::remove_all(limit) } + /// Attempt to remove all items from the map. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map. If so, then the map may not be empty when the resultant + /// `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must always be provided through in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map. Subsequent calls + /// operating on the same map should always pass `Some`, and this should be equal to the + /// previous call result's `maybe_cursor` field. + pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults { + >::clear(limit, maybe_cursor) + } + /// Iter over all value of the storage. /// /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. @@ -563,7 +592,7 @@ mod test { A::insert(3, 10); A::insert(4, 10); - A::remove_all(None); + let _ = A::clear(u32::max_value(), None); assert_eq!(A::contains_key(3), false); assert_eq!(A::contains_key(4), false); @@ -618,7 +647,7 @@ mod test { ] ); - WithLen::remove_all(None); + let _ = WithLen::clear(u32::max_value(), None); assert_eq!(WithLen::decode_len(3), None); WithLen::append(0, 10); assert_eq!(WithLen::decode_len(0), Some(1)); diff --git a/frame/support/src/storage/types/nmap.rs b/frame/support/src/storage/types/nmap.rs index a15c7482105e9..1f303525b7476 100755 --- a/frame/support/src/storage/types/nmap.rs +++ b/frame/support/src/storage/types/nmap.rs @@ -185,13 +185,53 @@ where /// Calling this multiple times per block with a `limit` set leads always to the same keys being /// removed and the same result being returned. This happens because the keys to delete in the /// overlay are not taken into account when deleting keys in the backend. + #[deprecated = "Use `clear_prefix` instead"] pub fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult where Key: HasKeyPrefix, { + #[allow(deprecated)] >::remove_prefix(partial_key, limit) } + /// Attempt to remove items from the map matching a `partial_key` prefix. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map which match the `partial key`. If so, then the map may not be + /// empty when the resultant `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must be provided in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map and `partial_key`. Subsequent + /// calls operating on the same map/`partial_key` should always pass `Some`, and this should be + /// equal to the previous call result's `maybe_cursor` field. + pub fn clear_prefix( + partial_key: KP, + limit: u32, + maybe_cursor: Option<&[u8]>, + ) -> sp_io::MultiRemovalResults + where + Key: HasKeyPrefix, + { + >::clear_prefix( + partial_key, + limit, + maybe_cursor, + ) + } + /// Iterate over values that share the first key. pub fn iter_prefix_values(partial_key: KP) -> PrefixIterator where @@ -299,8 +339,37 @@ where /// Calling this multiple times per block with a `limit` set leads always to the same keys being /// removed and the same result being returned. This happens because the keys to delete in the /// overlay are not taken into account when deleting keys in the backend. + #[deprecated = "Use `clear` instead"] pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { - >::remove_all(limit) + #[allow(deprecated)] + >::remove_all(limit).into() + } + + /// Attempt to remove all items from the map. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given map, it is important that no further items + /// are inserted into the map. If so, then the map may not be empty when the resultant + /// `maybe_cursor` is `None`. + /// + /// # Limit + /// + /// A `limit` must always be provided through in order to cap the maximum + /// amount of deletions done in a single call. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A `limit` of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given storage map. Subsequent calls + /// operating on the same map should always pass `Some`, and this should be equal to the + /// previous call result's `maybe_cursor` field. + pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults { + >::clear(limit, maybe_cursor) } /// Iter over all value of the storage. @@ -544,7 +613,7 @@ mod test { use crate::{ hash::{StorageHasher as _, *}, metadata::{StorageEntryModifier, StorageHasher}, - storage::types::{Key, ValueQuery}, + storage::types::{Key as NMapKey, ValueQuery}, }; use sp_io::{hashing::twox_128, TestExternalities}; @@ -565,12 +634,12 @@ mod test { #[test] fn test_1_key() { - type A = StorageNMap, u32, OptionQuery>; + type A = StorageNMap, u32, OptionQuery>; type AValueQueryWithAnOnEmpty = - StorageNMap, u32, ValueQuery, ADefault>; - type B = StorageNMap, u32, ValueQuery>; - type C = StorageNMap, u8, ValueQuery>; - type WithLen = StorageNMap, Vec>; + StorageNMap, u32, ValueQuery, ADefault>; + type B = StorageNMap, u32, ValueQuery>; + type C = StorageNMap, u8, ValueQuery>; + type WithLen = StorageNMap, Vec>; TestExternalities::default().execute_with(|| { let mut k: Vec = vec![]; @@ -590,13 +659,13 @@ mod test { { #[crate::storage_alias] - type Foo = StorageNMap), u32>; + type Foo = StorageNMap), u32>; assert_eq!(Foo::contains_key((3,)), true); assert_eq!(Foo::get((3,)), Some(10)); } - A::swap::, _, _>((3,), (2,)); + A::swap::, _, _>((3,), (2,)); assert_eq!(A::contains_key((3,)), false); assert_eq!(A::contains_key((2,)), true); assert_eq!(A::get((3,)), None); @@ -684,7 +753,7 @@ mod test { A::insert((3,), 10); A::insert((4,), 10); - A::remove_all(None); + let _ = A::clear(u32::max_value(), None); assert_eq!(A::contains_key((3,)), false); assert_eq!(A::contains_key((4,)), false); @@ -739,7 +808,7 @@ mod test { ] ); - WithLen::remove_all(None); + let _ = WithLen::clear(u32::max_value(), None); assert_eq!(WithLen::decode_len((3,)), None); WithLen::append((0,), 10); assert_eq!(WithLen::decode_len((0,)), Some(1)); @@ -750,26 +819,30 @@ mod test { fn test_2_keys() { type A = StorageNMap< Prefix, - (Key, Key), + (NMapKey, NMapKey), u32, OptionQuery, >; type AValueQueryWithAnOnEmpty = StorageNMap< Prefix, - (Key, Key), + (NMapKey, NMapKey), u32, ValueQuery, ADefault, >; - type B = StorageNMap, Key), u32, ValueQuery>; + type B = + StorageNMap, NMapKey), u32, ValueQuery>; type C = StorageNMap< Prefix, - (Key, Key), + (NMapKey, NMapKey), u8, ValueQuery, >; - type WithLen = - StorageNMap, Key), Vec>; + type WithLen = StorageNMap< + Prefix, + (NMapKey, NMapKey), + Vec, + >; TestExternalities::default().execute_with(|| { let mut k: Vec = vec![]; @@ -788,7 +861,10 @@ mod test { assert_eq!(A::get((3, 30)), Some(10)); assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 10); - A::swap::<(Key, Key), _, _>((3, 30), (2, 20)); + A::swap::<(NMapKey, NMapKey), _, _>( + (3, 30), + (2, 20), + ); assert_eq!(A::contains_key((3, 30)), false); assert_eq!(A::contains_key((2, 20)), true); assert_eq!(A::get((3, 30)), None); @@ -877,7 +953,7 @@ mod test { A::insert((3, 30), 10); A::insert((4, 40), 10); - A::remove_all(None); + let _ = A::clear(u32::max_value(), None); assert_eq!(A::contains_key((3, 30)), false); assert_eq!(A::contains_key((4, 40)), false); @@ -938,7 +1014,7 @@ mod test { ] ); - WithLen::remove_all(None); + let _ = WithLen::clear(u32::max_value(), None); assert_eq!(WithLen::decode_len((3, 30)), None); WithLen::append((0, 100), 10); assert_eq!(WithLen::decode_len((0, 100)), Some(1)); @@ -956,32 +1032,48 @@ mod test { fn test_3_keys() { type A = StorageNMap< Prefix, - (Key, Key, Key), + ( + NMapKey, + NMapKey, + NMapKey, + ), u32, OptionQuery, >; type AValueQueryWithAnOnEmpty = StorageNMap< Prefix, - (Key, Key, Key), + ( + NMapKey, + NMapKey, + NMapKey, + ), u32, ValueQuery, ADefault, >; type B = StorageNMap< Prefix, - (Key, Key, Key), + (NMapKey, NMapKey, NMapKey), u32, ValueQuery, >; type C = StorageNMap< Prefix, - (Key, Key, Key), + ( + NMapKey, + NMapKey, + NMapKey, + ), u8, ValueQuery, >; type WithLen = StorageNMap< Prefix, - (Key, Key, Key), + ( + NMapKey, + NMapKey, + NMapKey, + ), Vec, >; @@ -1004,7 +1096,11 @@ mod test { assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 30); A::swap::< - (Key, Key, Key), + ( + NMapKey, + NMapKey, + NMapKey, + ), _, _, >((1, 10, 100), (2, 20, 200)); @@ -1093,7 +1189,7 @@ mod test { A::insert((3, 30, 300), 10); A::insert((4, 40, 400), 10); - A::remove_all(None); + let _ = A::clear(u32::max_value(), None); assert_eq!(A::contains_key((3, 30, 300)), false); assert_eq!(A::contains_key((4, 40, 400)), false); @@ -1161,7 +1257,7 @@ mod test { ] ); - WithLen::remove_all(None); + let _ = WithLen::clear(u32::max_value(), None); assert_eq!(WithLen::decode_len((3, 30, 300)), None); WithLen::append((0, 100, 1000), 10); assert_eq!(WithLen::decode_len((0, 100, 1000)), Some(1)); diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index adabc9c14a3d5..fc6d8ae79c57d 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -96,10 +96,63 @@ pub fn kill(key: &[u8]) { } /// Ensure keys with the given `prefix` have no entries in storage. +#[deprecated = "Use `clear_prefix` instead"] pub fn kill_prefix(prefix: &[u8], limit: Option) -> sp_io::KillStorageResult { + // TODO: Once the network has upgraded to include the new host functions, this code can be + // enabled. + // clear_prefix(prefix, limit).into() sp_io::storage::clear_prefix(prefix, limit) } +/// Partially clear the storage of all keys under a common `prefix`. +/// +/// # Limit +/// +/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the +/// maximum number of backend iterations which may be done by this operation and as such +/// represents the maximum number of backend deletions which may happen. A *limit* of zero +/// implies that no keys will be deleted, though there may be a single iteration done. +/// +/// The limit can be used to partially delete storage items in case it is too large or costly +/// to delete all in a single operation. +/// +/// # Cursor +/// +/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be +/// passed once (in the initial call) for any attempt to clear storage. In general, subsequent calls +/// operating on the same prefix should pass `Some` and this value should be equal to the +/// previous call result's `maybe_cursor` field. The only exception to this is when you can +/// guarantee that the subsequent call is in a new block; in this case the previous call's result +/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful +/// then making this call solely from a block-hook such as `on_initialize`. +/// +/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the +/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. +/// +/// NOTE: After the initial call for any given child storage, it is important that no keys further +/// keys are inserted. If so, then they may or may not be deleted by subsequent calls. +/// +/// # Note +/// +/// Please note that keys which are residing in the overlay for the child are deleted without +/// counting towards the `limit`. +pub fn clear_prefix( + prefix: &[u8], + maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, +) -> sp_io::MultiRemovalResults { + // TODO: Once the network has upgraded to include the new host functions, this code can be + // enabled. + // sp_io::storage::clear_prefix(prefix, maybe_limit, maybe_cursor) + use sp_io::{KillStorageResult::*, MultiRemovalResults}; + #[allow(deprecated)] + let (maybe_cursor, i) = match kill_prefix(prefix, maybe_limit) { + AllRemoved(i) => (None, i), + SomeRemaining(i) => (Some(prefix.to_vec()), i), + }; + MultiRemovalResults { maybe_cursor, backend: i, unique: i, loops: i } +} + /// Get a Vec of bytes from storage. pub fn get_raw(key: &[u8]) -> Option> { sp_io::storage::get(key) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index ba494dfbd9f8c..f2999f945c5c2 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -475,7 +475,7 @@ pub mod pallet { _subkeys: u32, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - storage::unhashed::kill_prefix(&prefix, None); + let _ = storage::unhashed::clear_prefix(&prefix, None, None); Ok(().into()) } @@ -1427,7 +1427,7 @@ impl Pallet { pub fn reset_events() { >::kill(); EventCount::::kill(); - >::remove_all(None); + let _ = >::clear(u32::max_value(), None); } /// Assert the given `event` exists. diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index cdaa31d4e1c8a..c5220b562b172 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -110,8 +110,10 @@ impl, I: 'static> Pallet { for (item, details) in Item::::drain_prefix(&collection) { Account::::remove((&details.owner, &collection, &item)); } + #[allow(deprecated)] ItemMetadataOf::::remove_prefix(&collection, None); CollectionMetadataOf::::remove(&collection); + #[allow(deprecated)] Attribute::::remove_prefix((&collection,), None); CollectionAccount::::remove(&collection_details.owner, &collection); T::Currency::unreserve(&collection_details.owner, collection_details.total_deposit); diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 49b190b4ae260..b343b8c393e00 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -52,6 +52,30 @@ pub enum Error { StorageUpdateFailed(&'static str), } +/// Results concerning an operation to remove many keys. +#[derive(codec::Encode, codec::Decode)] +#[must_use] +pub struct MultiRemovalResults { + /// A continuation cursor which, if `Some` must be provided to the subsequent removal call. + /// If `None` then all removals are complete and no further calls are needed. + pub maybe_cursor: Option>, + /// The number of items removed from the backend database. + pub backend: u32, + /// The number of unique keys removed, taking into account both the backend and the overlay. + pub unique: u32, + /// The number of iterations (each requiring a storage seek/read) which were done. + pub loops: u32, +} + +impl MultiRemovalResults { + /// Deconstruct into the internal components. + /// + /// Returns `(maybe_cursor, backend, unique, loops)`. + pub fn deconstruct(self) -> (Option>, u32, u32, u32) { + (self.maybe_cursor, self.backend, self.unique, self.loops) + } +} + /// The Substrate externalities. /// /// Provides access to the storage and to other registered extensions. @@ -118,32 +142,47 @@ pub trait Externalities: ExtensionStore { /// Clear an entire child storage. /// - /// Deletes all keys from the overlay and up to `limit` keys from the backend. No - /// limit is applied if `limit` is `None`. Returned boolean is `true` if the child trie was - /// removed completely and `false` if there are remaining keys after the function - /// returns. Returned `u32` is the number of keys that was removed at the end of the - /// operation. + /// Deletes all keys from the overlay and up to `maybe_limit` keys from the backend. No + /// limit is applied if `maybe_limit` is `None`. Returns the cursor for the next call as `Some` + /// if the child trie deletion operation is incomplete. In this case, it should be passed into + /// the next call to avoid unaccounted iterations on the backend. Returns also the the number + /// of keys that were removed from the backend, the number of unique keys removed in total + /// (including from the overlay) and the number of backend iterations done. + /// + /// As long as `maybe_cursor` is passed from the result of the previous call, then the number of + /// iterations done will only ever be one more than the number of keys removed. /// /// # Note /// /// An implementation is free to delete more keys than the specified limit as long as /// it is able to do that in constant time. - fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> (bool, u32); + fn kill_child_storage( + &mut self, + child_info: &ChildInfo, + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults; /// Clear storage entries which keys are start with the given prefix. /// - /// `limit` and result works as for `kill_child_storage`. - fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> (bool, u32); + /// `maybe_limit`, `maybe_cursor` and result works as for `kill_child_storage`. + fn clear_prefix( + &mut self, + prefix: &[u8], + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults; /// Clear child storage entries which keys are start with the given prefix. /// - /// `limit` and result works as for `kill_child_storage`. + /// `maybe_limit`, `maybe_cursor` and result works as for `kill_child_storage`. fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - limit: Option, - ) -> (bool, u32); + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults; /// Set or clear a storage entry (`key`) of current contract being called (effective /// immediately). diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index bd288dd896d1b..09a087d509416 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -25,7 +25,7 @@ sp-state-machine = { version = "0.12.0", optional = true, path = "../state-machi sp-wasm-interface = { version = "6.0.0", path = "../wasm-interface", default-features = false } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } sp-trie = { version = "6.0.0", optional = true, path = "../trie" } -sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" } +sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" } log = { version = "0.4.17", optional = true } futures = { version = "0.3.21", features = ["thread-pool"], optional = true } @@ -37,6 +37,7 @@ tracing-core = { version = "0.1.26", default-features = false} [features] default = ["std"] std = [ + "sp-externalities/std", "sp-core/std", "sp-keystore", "codec/std", @@ -47,7 +48,6 @@ std = [ "libsecp256k1", "secp256k1", "sp-runtime-interface/std", - "sp-externalities", "sp-wasm-interface/std", "sp-tracing/std", "tracing/std", diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 8f62b03b62ea9..0a931919437af 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -81,6 +81,8 @@ mod batch_verifier; #[cfg(feature = "std")] use batch_verifier::BatchVerifier; +pub use sp_externalities::MultiRemovalResults; + #[cfg(feature = "std")] const LOG_TARGET: &str = "runtime::io"; @@ -99,12 +101,24 @@ pub enum EcdsaVerifyError { /// removed from the backend from making the `storage_kill` call. #[derive(PassByCodec, Encode, Decode)] pub enum KillStorageResult { - /// All key to remove were removed, return number of key removed from backend. + /// All keys to remove were removed, return number of iterations performed during the + /// operation. AllRemoved(u32), - /// Not all key to remove were removed, return number of key removed from backend. + /// Not all key to remove were removed, return number of iterations performed during the + /// operation. SomeRemaining(u32), } +impl From for KillStorageResult { + fn from(r: MultiRemovalResults) -> Self { + match r { + MultiRemovalResults { maybe_cursor: None, backend, .. } => Self::AllRemoved(backend), + MultiRemovalResults { maybe_cursor: Some(..), backend, .. } => + Self::SomeRemaining(backend), + } + } +} + /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -145,7 +159,7 @@ pub trait Storage { /// Clear the storage of each key-value pair where the key starts with the given `prefix`. fn clear_prefix(&mut self, prefix: &[u8]) { - let _ = Externalities::clear_prefix(*self, prefix, None); + let _ = Externalities::clear_prefix(*self, prefix, None, None); } /// Clear the storage of each key-value pair where the key starts with the given `prefix`. @@ -175,13 +189,60 @@ pub trait Storage { /// backend. #[version(2)] fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> KillStorageResult { - let (all_removed, num_removed) = Externalities::clear_prefix(*self, prefix, limit); - match all_removed { - true => KillStorageResult::AllRemoved(num_removed), - false => KillStorageResult::SomeRemaining(num_removed), + let r = Externalities::clear_prefix(*self, prefix, limit, None); + match r.maybe_cursor { + None => KillStorageResult::AllRemoved(r.loops), + Some(_) => KillStorageResult::SomeRemaining(r.loops), } } + /// Partially clear the storage of each key-value pair where the key starts with the given + /// prefix. + /// + /// # Limit + /// + /// A *limit* should always be provided through `maybe_limit`. This is one fewer than the + /// maximum number of backend iterations which may be done by this operation and as such + /// represents the maximum number of backend deletions which may happen. A *limit* of zero + /// implies that no keys will be deleted, though there may be a single iteration done. + /// + /// The limit can be used to partially delete a prefix storage in case it is too large or costly + /// to delete in a single operation. + /// + /// # Cursor + /// + /// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be + /// passed once (in the initial call) for any given `maybe_prefix` value. Subsequent calls + /// operating on the same prefix should always pass `Some`, and this should be equal to the + /// previous call result's `maybe_cursor` field. + /// + /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once + /// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + /// + /// NOTE: After the initial call for any given prefix, it is important that no keys further + /// keys under the same prefix are inserted. If so, then they may or may not be deleted by + /// subsequent calls. + /// + /// # Note + /// + /// Please note that keys which are residing in the overlay for that prefix when + /// issuing this call are deleted without counting towards the `limit`. + #[version(3, register_only)] + fn clear_prefix( + &mut self, + maybe_prefix: &[u8], + maybe_limit: Option, + maybe_cursor: Option>, //< TODO Make work or just Option>? + ) -> MultiRemovalResults { + Externalities::clear_prefix( + *self, + maybe_prefix, + maybe_limit, + maybe_cursor.as_ref().map(|x| &x[..]), + ) + .into() + } + /// Append the encoded `value` to the storage item at `key`. /// /// The storage item needs to implement [`EncodeAppend`](codec::EncodeAppend). @@ -323,7 +384,7 @@ pub trait DefaultChildStorage { /// is removed. fn storage_kill(&mut self, storage_key: &[u8]) { let child_info = ChildInfo::new_default(storage_key); - self.kill_child_storage(&child_info, None); + let _ = self.kill_child_storage(&child_info, None, None); } /// Clear a child storage key. @@ -332,8 +393,8 @@ pub trait DefaultChildStorage { #[version(2)] fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> bool { let child_info = ChildInfo::new_default(storage_key); - let (all_removed, _num_removed) = self.kill_child_storage(&child_info, limit); - all_removed + let r = self.kill_child_storage(&child_info, limit, None); + r.maybe_cursor.is_none() } /// Clear a child storage key. @@ -342,13 +403,28 @@ pub trait DefaultChildStorage { #[version(3)] fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> KillStorageResult { let child_info = ChildInfo::new_default(storage_key); - let (all_removed, num_removed) = self.kill_child_storage(&child_info, limit); - match all_removed { - true => KillStorageResult::AllRemoved(num_removed), - false => KillStorageResult::SomeRemaining(num_removed), + let r = self.kill_child_storage(&child_info, limit, None); + match r.maybe_cursor { + None => KillStorageResult::AllRemoved(r.loops), + Some(..) => KillStorageResult::SomeRemaining(r.loops), } } + /// Clear a child storage key. + /// + /// See `Storage` module `clear_prefix` documentation for `limit` usage. + #[version(4, register_only)] + fn storage_kill( + &mut self, + storage_key: &[u8], + maybe_limit: Option, + maybe_cursor: Option>, + ) -> MultiRemovalResults { + let child_info = ChildInfo::new_default(storage_key); + self.kill_child_storage(&child_info, maybe_limit, maybe_cursor.as_ref().map(|x| &x[..])) + .into() + } + /// Check a child storage key. /// /// Check whether the given `key` exists in default child defined at `storage_key`. @@ -362,7 +438,7 @@ pub trait DefaultChildStorage { /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. fn clear_prefix(&mut self, storage_key: &[u8], prefix: &[u8]) { let child_info = ChildInfo::new_default(storage_key); - let _ = self.clear_child_prefix(&child_info, prefix, None); + let _ = self.clear_child_prefix(&child_info, prefix, None, None); } /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. @@ -376,13 +452,34 @@ pub trait DefaultChildStorage { limit: Option, ) -> KillStorageResult { let child_info = ChildInfo::new_default(storage_key); - let (all_removed, num_removed) = self.clear_child_prefix(&child_info, prefix, limit); - match all_removed { - true => KillStorageResult::AllRemoved(num_removed), - false => KillStorageResult::SomeRemaining(num_removed), + let r = self.clear_child_prefix(&child_info, prefix, limit, None); + match r.maybe_cursor { + None => KillStorageResult::AllRemoved(r.loops), + Some(..) => KillStorageResult::SomeRemaining(r.loops), } } + /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. + /// + /// See `Storage` module `clear_prefix` documentation for `limit` usage. + #[version(3, register_only)] + fn clear_prefix( + &mut self, + storage_key: &[u8], + prefix: &[u8], + maybe_limit: Option, + maybe_cursor: Option>, + ) -> MultiRemovalResults { + let child_info = ChildInfo::new_default(storage_key); + self.clear_child_prefix( + &child_info, + prefix, + maybe_limit, + maybe_cursor.as_ref().map(|x| &x[..]), + ) + .into() + } + /// Default child root calculation. /// /// "Commit" all existing operations and compute the resulting child storage root. @@ -1757,15 +1854,30 @@ mod tests { }); t.execute_with(|| { + // We can switch to this once we enable v3 of the `clear_prefix`. + //assert!(matches!( + // storage::clear_prefix(b":abc", None), + // MultiRemovalResults::NoneLeft { db: 2, total: 2 } + //)); assert!(matches!( storage::clear_prefix(b":abc", None), - KillStorageResult::AllRemoved(2) + KillStorageResult::AllRemoved(2), )); assert!(storage::get(b":a").is_some()); assert!(storage::get(b":abdd").is_some()); assert!(storage::get(b":abcd").is_none()); assert!(storage::get(b":abc").is_none()); + + // We can switch to this once we enable v3 of the `clear_prefix`. + //assert!(matches!( + // storage::clear_prefix(b":abc", None), + // MultiRemovalResults::NoneLeft { db: 0, total: 0 } + //)); + assert!(matches!( + storage::clear_prefix(b":abc", None), + KillStorageResult::AllRemoved(0), + )); }); } diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index c9419f73722c6..8cd31bab559ea 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -18,7 +18,7 @@ sp-wasm-interface = { version = "6.0.0", path = "../wasm-interface", default-fea sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" } sp-runtime-interface-proc-macro = { version = "5.0.0", path = "proc-macro" } -sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" } +sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } static_assertions = "1.0.0" primitive-types = { version = "0.11.1", default-features = false } @@ -40,7 +40,7 @@ std = [ "sp-std/std", "sp-tracing/std", "codec/std", - "sp-externalities", + "sp-externalities/std", "primitive-types/std", ] diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 3c1927d6b361a..e801931c306cf 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -548,3 +548,7 @@ impl PassBy for sp_storage::TrackedStorageKey { impl PassBy for sp_storage::StateVersion { type PassBy = Enum; } + +impl PassBy for sp_externalities::MultiRemovalResults { + type PassBy = Codec; +} diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index e472b66c5d9ee..1c652966180a7 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -35,6 +35,7 @@ hex-literal = "0.3.4" pretty_assertions = "1.2.1" rand = "0.7.2" sp-runtime = { version = "6.0.0", path = "../runtime" } +assert_matches = "1.5" [features] default = ["std"] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index e9d4ac0fb8224..505b53c800f9e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -112,6 +112,7 @@ pub trait Backend: sp_std::fmt::Debug { &self, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, + start_at: Option<&[u8]>, f: F, ); diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index bb387e6406243..6efc847bfbdb7 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -29,7 +29,7 @@ use sp_core::{ traits::Externalities, Blake2Hasher, }; -use sp_externalities::{Extension, Extensions}; +use sp_externalities::{Extension, Extensions, MultiRemovalResults}; use sp_trie::{empty_child_trie_root, HashKey, LayoutV0, LayoutV1, TrieConfiguration}; use std::{ any::{Any, TypeId}, @@ -209,23 +209,34 @@ impl Externalities for BasicExternalities { } } - fn kill_child_storage(&mut self, child_info: &ChildInfo, _limit: Option) -> (bool, u32) { - let num_removed = self + fn kill_child_storage( + &mut self, + child_info: &ChildInfo, + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { + let count = self .inner .children_default .remove(child_info.storage_key()) .map(|c| c.data.len()) - .unwrap_or(0); - (true, num_removed as u32) + .unwrap_or(0) as u32; + MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } - fn clear_prefix(&mut self, prefix: &[u8], _limit: Option) -> (bool, u32) { + fn clear_prefix( + &mut self, + prefix: &[u8], + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { if is_child_storage_key(prefix) { warn!( target: "trie", "Refuse to clear prefix that is part of child storage key via main storage" ); - return (false, 0) + let maybe_cursor = Some(prefix.to_vec()); + return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } } let to_remove = self @@ -237,19 +248,20 @@ impl Externalities for BasicExternalities { .cloned() .collect::>(); - let num_removed = to_remove.len(); + let count = to_remove.len() as u32; for key in to_remove { self.inner.top.remove(&key); } - (true, num_removed as u32) + MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - _limit: Option, - ) -> (bool, u32) { + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { let to_remove = child .data @@ -259,13 +271,13 @@ impl Externalities for BasicExternalities { .cloned() .collect::>(); - let num_removed = to_remove.len(); + let count = to_remove.len() as u32; for key in to_remove { child.data.remove(&key); } - (true, num_removed as u32) + MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } else { - (true, 0) + MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } } } @@ -434,7 +446,7 @@ mod tests { ext.clear_child_storage(child_info, b"dog"); assert_eq!(ext.child_storage(child_info, b"dog"), None); - ext.kill_child_storage(child_info, None); + let _ = ext.kill_child_storage(child_info, None, None); assert_eq!(ext.child_storage(child_info, b"doe"), None); } @@ -456,8 +468,8 @@ mod tests { ], }); - let res = ext.kill_child_storage(child_info, None); - assert_eq!(res, (true, 3)); + let res = ext.kill_child_storage(child_info, None, None); + assert_eq!(res.deconstruct(), (None, 3, 3, 3)); } #[test] diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index e33569e2a1f67..1db0ec517015b 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -27,7 +27,7 @@ use sp_core::hexdisplay::HexDisplay; use sp_core::storage::{ well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey, }; -use sp_externalities::{Extension, ExtensionStore, Externalities}; +use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; use sp_trie::{empty_child_trie_root, LayoutV1}; use crate::{log_error, trace, warn, StorageTransactionCache}; @@ -433,7 +433,12 @@ where self.overlay.set_child_storage(child_info, key, value); } - fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> (bool, u32) { + fn kill_child_storage( + &mut self, + child_info: &ChildInfo, + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { trace!( target: "state", method = "ChildKill", @@ -442,11 +447,18 @@ where ); let _guard = guard(); self.mark_dirty(); - self.overlay.clear_child_storage(child_info); - self.limit_remove_from_backend(Some(child_info), None, limit) + let overlay = self.overlay.clear_child_storage(child_info); + let (maybe_cursor, backend, loops) = + self.limit_remove_from_backend(Some(child_info), None, maybe_limit, maybe_cursor); + MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops } } - fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> (bool, u32) { + fn clear_prefix( + &mut self, + prefix: &[u8], + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { trace!( target: "state", method = "ClearPrefix", @@ -460,20 +472,23 @@ where target: "trie", "Refuse to directly clear prefix that is part or contains of child storage key", ); - return (false, 0) + return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } } self.mark_dirty(); - self.overlay.clear_prefix(prefix); - self.limit_remove_from_backend(None, Some(prefix), limit) + let overlay = self.overlay.clear_prefix(prefix); + let (maybe_cursor, backend, loops) = + self.limit_remove_from_backend(None, Some(prefix), maybe_limit, maybe_cursor); + MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops } } fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - limit: Option, - ) -> (bool, u32) { + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { trace!( target: "state", method = "ChildClearPrefix", @@ -484,8 +499,14 @@ where let _guard = guard(); self.mark_dirty(); - self.overlay.clear_child_prefix(child_info, prefix); - self.limit_remove_from_backend(Some(child_info), Some(prefix), limit) + let overlay = self.overlay.clear_child_prefix(child_info, prefix); + let (maybe_cursor, backend, loops) = self.limit_remove_from_backend( + Some(child_info), + Some(prefix), + maybe_limit, + maybe_cursor, + ); + MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops } } fn storage_append(&mut self, key: Vec, value: Vec) { @@ -728,45 +749,37 @@ where { fn limit_remove_from_backend( &mut self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - limit: Option, - ) -> (bool, u32) { - let mut num_deleted: u32 = 0; - - if let Some(limit) = limit { - let mut all_deleted = true; - self.backend.apply_to_keys_while(child_info, prefix, |key| { - if num_deleted == limit { - all_deleted = false; - return false - } - if let Some(num) = num_deleted.checked_add(1) { - num_deleted = num; - } else { - all_deleted = false; + maybe_child: Option<&ChildInfo>, + maybe_prefix: Option<&[u8]>, + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> (Option>, u32, u32) { + let mut delete_count: u32 = 0; + let mut loop_count: u32 = 0; + let mut maybe_next_key = None; + self.backend + .apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| { + if maybe_limit.map_or(false, |limit| loop_count == limit) { + maybe_next_key = Some(key.to_vec()); return false } - if let Some(child_info) = child_info { - self.overlay.set_child_storage(child_info, key.to_vec(), None); - } else { - self.overlay.set_storage(key.to_vec(), None); - } - true - }); - (all_deleted, num_deleted) - } else { - self.backend.apply_to_keys_while(child_info, prefix, |key| { - num_deleted = num_deleted.saturating_add(1); - if let Some(child_info) = child_info { - self.overlay.set_child_storage(child_info, key.to_vec(), None); - } else { - self.overlay.set_storage(key.to_vec(), None); + let overlay = match maybe_child { + Some(child_info) => self.overlay.child_storage(child_info, key), + None => self.overlay.storage(key), + }; + if !matches!(overlay, Some(None)) { + // not pending deletion from the backend - delete it. + if let Some(child_info) = maybe_child { + self.overlay.set_child_storage(child_info, key.to_vec(), None); + } else { + self.overlay.set_storage(key.to_vec(), None); + } + delete_count = delete_count.saturating_add(1); } + loop_count = loop_count.saturating_add(1); true }); - (true, num_deleted) - } + (maybe_next_key, delete_count, loop_count) } } @@ -1085,14 +1098,14 @@ mod tests { not_under_prefix.extend(b"path"); ext.set_storage(not_under_prefix.clone(), vec![10]); - ext.clear_prefix(&[], None); - ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None); + let _ = ext.clear_prefix(&[], None, None); + let _ = ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None, None); let mut under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec(); under_prefix.extend(b"path"); - ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None); + let _ = ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None, None); assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![40])); assert_eq!(ext.storage(not_under_prefix.as_slice()), Some(vec![10])); - ext.clear_prefix(¬_under_prefix[..5], None); + let _ = ext.clear_prefix(¬_under_prefix[..5], None, None); assert_eq!(ext.storage(not_under_prefix.as_slice()), None); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index e5b521588aa79..edc3db7a60e36 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1348,6 +1348,7 @@ mod execution { mod tests { use super::{ext::Ext, *}; use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key}; + use assert_matches::assert_matches; use codec::{Decode, Encode}; use sp_core::{ map, @@ -1572,7 +1573,7 @@ mod tests { { let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); - ext.clear_prefix(b"ab", None); + let _ = ext.clear_prefix(b"ab", None, None); } overlay.commit_transaction().unwrap(); @@ -1596,7 +1597,10 @@ mod tests { { let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); - assert_eq!((false, 1), ext.clear_prefix(b"ab", Some(1))); + assert_matches!( + ext.clear_prefix(b"ab", Some(1), None).deconstruct(), + (Some(_), 1, 3, 1) + ); } overlay.commit_transaction().unwrap(); @@ -1638,7 +1642,8 @@ mod tests { { let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); - assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); + let r = ext.kill_child_storage(&child_info, Some(2), None); + assert_matches!(r.deconstruct(), (Some(_), 2, 6, 2)); } assert_eq!( @@ -1673,14 +1678,37 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); - assert_eq!(ext.kill_child_storage(&child_info, Some(0)), (false, 0)); - assert_eq!(ext.kill_child_storage(&child_info, Some(1)), (false, 1)); - assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); - assert_eq!(ext.kill_child_storage(&child_info, Some(3)), (false, 3)); - assert_eq!(ext.kill_child_storage(&child_info, Some(4)), (true, 4)); - // Only 4 items to remove - assert_eq!(ext.kill_child_storage(&child_info, Some(5)), (true, 4)); - assert_eq!(ext.kill_child_storage(&child_info, None), (true, 4)); + let r = ext.kill_child_storage(&child_info, Some(0), None).deconstruct(); + assert_matches!(r, (Some(_), 0, 0, 0)); + let r = ext + .kill_child_storage(&child_info, Some(1), r.0.as_ref().map(|x| &x[..])) + .deconstruct(); + assert_matches!(r, (Some(_), 1, 1, 1)); + let r = ext + .kill_child_storage(&child_info, Some(4), r.0.as_ref().map(|x| &x[..])) + .deconstruct(); + // Only 3 items remaining to remove + assert_matches!(r, (None, 3, 3, 3)); + let r = ext.kill_child_storage(&child_info, Some(1), None).deconstruct(); + assert_matches!(r, (Some(_), 0, 0, 1)); + } + + #[test] + fn limited_child_kill_off_by_one_works_without_limit() { + let child_info = ChildInfo::new_default(b"sub1"); + let initial: HashMap<_, BTreeMap<_, _>> = map![ + Some(child_info.clone()) => map![ + b"a".to_vec() => b"0".to_vec(), + b"b".to_vec() => b"1".to_vec(), + b"c".to_vec() => b"2".to_vec(), + b"d".to_vec() => b"3".to_vec() + ], + ]; + let backend = InMemoryBackend::::from((initial, StateVersion::default())); + let mut overlay = OverlayedChanges::default(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); + assert_eq!(ext.kill_child_storage(&child_info, None, None).deconstruct(), (None, 4, 4, 4)); } #[test] @@ -1695,7 +1723,7 @@ mod tests { ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec()); assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec())); - ext.kill_child_storage(child_info, None); + let _ = ext.kill_child_storage(child_info, None, None); assert_eq!(ext.child_storage(child_info, b"abc"), None); } diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index 9e7f6ffeddfd7..ae5d47f6bb943 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -410,10 +410,15 @@ impl OverlayedChangeSet { &mut self, predicate: impl Fn(&[u8], &OverlayedValue) -> bool, at_extrinsic: Option, - ) { + ) -> u32 { + let mut count = 0; for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { + if val.value_ref().is_some() { + count += 1; + } val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); } + count } /// Get the iterator over all changes that follow the supplied `key`. diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 2161b343711c9..746599a6768e6 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -299,7 +299,7 @@ impl OverlayedChanges { /// Clear child storage of given storage key. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) { + pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) -> u32 { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; @@ -309,20 +309,20 @@ impl OverlayedChanges { .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); - changeset.clear_where(|_, _| true, extrinsic_index); + changeset.clear_where(|_, _| true, extrinsic_index) } /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) { - self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index()); + pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { + self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index()) } /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction - pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) { + pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) -> u32 { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; @@ -332,7 +332,7 @@ impl OverlayedChanges { .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); - changeset.clear_where(|key, _| key.starts_with(prefix), extrinsic_index); + changeset.clear_where(|key, _| key.starts_with(prefix), extrinsic_index) } /// Returns the current nesting depth of the transaction stack. diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index b47e6c693a3e8..f5cf542908b10 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -288,9 +288,10 @@ where &self, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, + start_at: Option<&[u8]>, f: F, ) { - self.0.apply_to_keys_while(child_info, prefix, f) + self.0.apply_to_keys_while(child_info, prefix, start_at, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 3c3e779c32f3a..622915a2d0525 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -25,6 +25,7 @@ use sp_core::{ traits::Externalities, Blake2Hasher, }; +use sp_externalities::MultiRemovalResults; use std::{ any::{Any, TypeId}, marker::PhantomData, @@ -124,11 +125,21 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("place_child_storage not supported in ReadOnlyExternalities") } - fn kill_child_storage(&mut self, _child_info: &ChildInfo, _limit: Option) -> (bool, u32) { + fn kill_child_storage( + &mut self, + _child_info: &ChildInfo, + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { unimplemented!("kill_child_storage is not supported in ReadOnlyExternalities") } - fn clear_prefix(&mut self, _prefix: &[u8], _limit: Option) -> (bool, u32) { + fn clear_prefix( + &mut self, + _prefix: &[u8], + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { unimplemented!("clear_prefix is not supported in ReadOnlyExternalities") } @@ -136,8 +147,9 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< &mut self, _child_info: &ChildInfo, _prefix: &[u8], - _limit: Option, - ) -> (bool, u32) { + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { unimplemented!("clear_child_prefix is not supported in ReadOnlyExternalities") } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index bc462ae01ab26..57d4f0b4898eb 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -381,7 +381,10 @@ mod tests { { let mut ext = ext.ext(); - assert!(!ext.kill_child_storage(&child_info, Some(2)).0, "Should not delete all keys"); + assert!( + ext.kill_child_storage(&child_info, Some(2), None).maybe_cursor.is_some(), + "Should not delete all keys" + ); assert!(ext.child_storage(&child_info, &b"doe"[..]).is_none()); assert!(ext.child_storage(&child_info, &b"dog"[..]).is_none()); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index c0a620120bff2..130b4bf178202 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -123,9 +123,10 @@ where &self, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, + start_at: Option<&[u8]>, f: F, ) { - self.essence.apply_to_keys_while(child_info, prefix, f) + self.essence.apply_to_keys_while(child_info, prefix, start_at, f) } fn for_child_keys_with_prefix( diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 11cac92efd2f4..c55a6b7e80481 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -267,7 +267,8 @@ where &self, child_info: Option<&ChildInfo>, prefix: Option<&[u8]>, - mut f: F, + start_at: Option<&[u8]>, + f: F, ) { let root = if let Some(child_info) = child_info.as_ref() { match self.child_root(child_info) { @@ -283,7 +284,7 @@ where self.root }; - self.trie_iter_key_inner(&root, prefix, |k| f(k), child_info) + self.trie_iter_key_inner(&root, prefix, f, child_info, start_at) } /// Execute given closure for all keys starting with prefix. @@ -311,6 +312,7 @@ where true }, Some(child_info), + None, ) } @@ -324,28 +326,31 @@ where true }, None, + None, ) } fn trie_iter_key_inner bool>( &self, root: &H::Out, - prefix: Option<&[u8]>, + maybe_prefix: Option<&[u8]>, mut f: F, child_info: Option<&ChildInfo>, + maybe_start_at: Option<&[u8]>, ) { let mut iter = move |db| -> sp_std::result::Result<(), Box>> { let trie = TrieDB::::new(db, root)?; - let iter = if let Some(prefix) = prefix.as_ref() { - TrieDBKeyIterator::new_prefixed(&trie, prefix)? - } else { - TrieDBKeyIterator::new(&trie)? - }; + let prefix = maybe_prefix.unwrap_or(&[]); + let iter = match maybe_start_at { + Some(start_at) => + TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), + None => TrieDBKeyIterator::new_prefixed(&trie, prefix), + }?; for x in iter { let key = x?; - debug_assert!(prefix + debug_assert!(maybe_prefix .as_ref() .map(|prefix| key.starts_with(prefix)) .unwrap_or(true)); diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 70e3437538e8f..008955a714b21 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -22,7 +22,7 @@ use sp_core::{ storage::{ChildInfo, StateVersion, TrackedStorageKey}, traits::{Externalities, RuntimeSpawn, RuntimeSpawnExt, SpawnNamed, TaskExecutorExt}, }; -use sp_externalities::{Extensions, ExternalitiesExt as _}; +use sp_externalities::{Extensions, ExternalitiesExt as _, MultiRemovalResults}; use std::any::{Any, TypeId}; /// Simple state-less externalities for use in async context. @@ -105,11 +105,21 @@ impl Externalities for AsyncExternalities { panic!("`place_child_storage`: should not be used in async externalities!") } - fn kill_child_storage(&mut self, _child_info: &ChildInfo, _limit: Option) -> (bool, u32) { + fn kill_child_storage( + &mut self, + _child_info: &ChildInfo, + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { panic!("`kill_child_storage`: should not be used in async externalities!") } - fn clear_prefix(&mut self, _prefix: &[u8], _limit: Option) -> (bool, u32) { + fn clear_prefix( + &mut self, + _prefix: &[u8], + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { panic!("`clear_prefix`: should not be used in async externalities!") } @@ -117,8 +127,9 @@ impl Externalities for AsyncExternalities { &mut self, _child_info: &ChildInfo, _prefix: &[u8], - _limit: Option, - ) -> (bool, u32) { + _maybe_limit: Option, + _maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { panic!("`clear_child_prefix`: should not be used in async externalities!") }