From e38998801e433ecc569ff6d58d1d0aa80eaff771 Mon Sep 17 00:00:00 2001 From: yjh Date: Mon, 18 Sep 2023 13:53:06 +0800 Subject: [PATCH 01/16] Executor: Remove `LegacyInstanceReuse` strategy (#1486) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It seems the old strategy have been depracted more than one year. So maybe it's time to clean up old strategy for wasm executor. --- polkadot address: 15ouFh2SHpGbHtDPsJ6cXQfes9Cx1gEFnJJsJVqPGzBSTudr --------- Co-authored-by: Bastian Köcher Co-authored-by: Koute --- .../pvf/prepare-worker/src/memory_stats.rs | 2 +- substrate/client/cli/src/arg_enums.rs | 8 - substrate/client/executor/benches/bench.rs | 7 - .../runtime_blob/data_segments_snapshot.rs | 87 ---------- .../src/runtime_blob/globals_snapshot.rs | 112 ------------- .../executor/common/src/runtime_blob/mod.rs | 4 - .../common/src/runtime_blob/runtime_blob.rs | 19 +-- .../executor/common/src/wasm_runtime.rs | 10 -- .../executor/src/integration_tests/linux.rs | 84 ---------- .../src/integration_tests/linux/smaps.rs | 82 ---------- .../executor/src/integration_tests/mod.rs | 11 -- .../client/executor/wasmtime/src/host.rs | 2 +- .../executor/wasmtime/src/instance_wrapper.rs | 104 +----------- .../client/executor/wasmtime/src/runtime.rs | 149 +----------------- .../client/executor/wasmtime/src/tests.rs | 19 +-- .../client/executor/wasmtime/src/util.rs | 30 +--- 16 files changed, 20 insertions(+), 710 deletions(-) delete mode 100644 substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs delete mode 100644 substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs delete mode 100644 substrate/client/executor/src/integration_tests/linux.rs delete mode 100644 substrate/client/executor/src/integration_tests/linux/smaps.rs diff --git a/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs b/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs index 7904dfa9cb88..c70ff56fc84d 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs @@ -151,7 +151,7 @@ pub mod memory_tracker { /// Module for dealing with the `ru_maxrss` (peak resident memory) stat from `getrusage`. /// /// NOTE: `getrusage` with the `RUSAGE_THREAD` parameter is only supported on Linux. `RUSAGE_SELF` -/// works on MacOS, but we need to get the max rss only for the preparation thread. Gettng it for +/// works on MacOS, but we need to get the max rss only for the preparation thread. Getting it for /// the current process would conflate the stats of previous jobs run by the process. #[cfg(target_os = "linux")] pub mod max_rss_stat { diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index 40d86fd97988..67acb82c2c30 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -38,12 +38,6 @@ pub enum WasmtimeInstantiationStrategy { /// Recreate the instance from scratch on every instantiation. Very slow. RecreateInstance, - - /// Legacy instance reuse mechanism. DEPRECATED. Will be removed in the future. - /// - /// Should only be used in case of encountering any issues with the new default - /// instantiation strategy. - LegacyInstanceReuse, } /// The default [`WasmtimeInstantiationStrategy`]. @@ -92,8 +86,6 @@ pub fn execution_method_from_cli( sc_service::config::WasmtimeInstantiationStrategy::Pooling, WasmtimeInstantiationStrategy::RecreateInstance => sc_service::config::WasmtimeInstantiationStrategy::RecreateInstance, - WasmtimeInstantiationStrategy::LegacyInstanceReuse => - sc_service::config::WasmtimeInstantiationStrategy::LegacyInstanceReuse, }, } } diff --git a/substrate/client/executor/benches/bench.rs b/substrate/client/executor/benches/bench.rs index 66a82a175221..86c769f88811 100644 --- a/substrate/client/executor/benches/bench.rs +++ b/substrate/client/executor/benches/bench.rs @@ -150,13 +150,6 @@ fn bench_call_instance(c: &mut Criterion) { let _ = env_logger::try_init(); let strategies = [ - ( - "legacy_instance_reuse", - Method::Compiled { - instantiation_strategy: InstantiationStrategy::LegacyInstanceReuse, - precompile: false, - }, - ), ( "recreate_instance_vanilla", Method::Compiled { diff --git a/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs b/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs deleted file mode 100644 index 3fd546ce4457..000000000000 --- a/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs +++ /dev/null @@ -1,87 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::RuntimeBlob; -use crate::error::{self, Error}; -use std::mem; -use wasm_instrument::parity_wasm::elements::Instruction; - -/// This is a snapshot of data segments specialzied for a particular instantiation. -/// -/// Note that this assumes that no mutable globals are used. -#[derive(Clone)] -pub struct DataSegmentsSnapshot { - /// The list of data segments represented by (offset, contents). - data_segments: Vec<(u32, Vec)>, -} - -impl DataSegmentsSnapshot { - /// Create a snapshot from the data segments from the module. - pub fn take(module: &RuntimeBlob) -> error::Result { - let data_segments = module - .data_segments() - .into_iter() - .map(|mut segment| { - // Just replace contents of the segment since the segments will be discarded later - // anyway. - let contents = mem::take(segment.value_mut()); - - let init_expr = match segment.offset() { - Some(offset) => offset.code(), - // Return if the segment is passive - None => return Err(Error::SharedMemUnsupported), - }; - - // [op, End] - if init_expr.len() != 2 { - return Err(Error::InitializerHasTooManyExpressions) - } - let offset = match &init_expr[0] { - Instruction::I32Const(v) => *v as u32, - Instruction::GetGlobal(_) => { - // In a valid wasm file, initializer expressions can only refer imported - // globals. - // - // At the moment of writing the Substrate Runtime Interface does not provide - // any globals. There is nothing that prevents us from supporting this - // if/when we gain those. - return Err(Error::ImportedGlobalsUnsupported) - }, - insn => return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))), - }; - - Ok((offset, contents)) - }) - .collect::>>()?; - - Ok(Self { data_segments }) - } - - /// Apply the given snapshot to a linear memory. - /// - /// Linear memory interface is represented by a closure `memory_set`. - pub fn apply( - &self, - mut memory_set: impl FnMut(u32, &[u8]) -> Result<(), E>, - ) -> Result<(), E> { - for (offset, contents) in &self.data_segments { - memory_set(*offset, contents)?; - } - Ok(()) - } -} diff --git a/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs b/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs deleted file mode 100644 index 9ba6fc55e49c..000000000000 --- a/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs +++ /dev/null @@ -1,112 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::RuntimeBlob; - -/// Saved value of particular exported global. -struct SavedValue { - /// The handle of this global which can be used to refer to this global. - handle: Global, - /// The global value that was observed during the snapshot creation. - value: sp_wasm_interface::Value, -} - -/// An adapter for a wasm module instance that is focused on getting and setting globals. -pub trait InstanceGlobals { - /// A handle to a global which can be used to get or set a global variable. This is supposed to - /// be a lightweight handle, like an index or an Rc-like smart-pointer, which is cheap to clone. - type Global: Clone; - /// Get a handle to a global by it's export name. - /// - /// The requested export is must exist in the exported list, and it should be a mutable global. - fn get_global(&mut self, export_name: &str) -> Self::Global; - /// Get the current value of the global. - fn get_global_value(&mut self, global: &Self::Global) -> sp_wasm_interface::Value; - /// Update the current value of the global. - /// - /// The global behind the handle is guaranteed to be mutable and the value to be the same type - /// as the global. - fn set_global_value(&mut self, global: &Self::Global, value: sp_wasm_interface::Value); -} - -/// A set of exposed mutable globals. -/// -/// This is set of globals required to create a [`GlobalsSnapshot`] and that are collected from -/// a runtime blob that was instrumented by -/// [`RuntimeBlob::expose_mutable_globals`](super::RuntimeBlob::expose_mutable_globals`). - -/// If the code wasn't instrumented then it would be empty and snapshot would do nothing. -pub struct ExposedMutableGlobalsSet(Vec); - -impl ExposedMutableGlobalsSet { - /// Collect the set from the given runtime blob. See the struct documentation for details. - pub fn collect(runtime_blob: &RuntimeBlob) -> Self { - let global_names = - runtime_blob.exported_internal_global_names().map(ToOwned::to_owned).collect(); - Self(global_names) - } -} - -/// A snapshot of a global variables values. This snapshot can be later used for restoring the -/// values to the preserved state. -/// -/// Technically, a snapshot stores only values of mutable global variables. This is because -/// immutable global variables always have the same values. -/// -/// We take it from an instance rather from a module because the start function could potentially -/// change any of the mutable global values. -pub struct GlobalsSnapshot(Vec>); - -impl GlobalsSnapshot { - /// Take a snapshot of global variables for a given instance. - /// - /// # Panics - /// - /// This function panics if the instance doesn't correspond to the module from which the - /// [`ExposedMutableGlobalsSet`] was collected. - pub fn take( - mutable_globals: &ExposedMutableGlobalsSet, - instance: &mut Instance, - ) -> Self - where - Instance: InstanceGlobals, - { - let global_names = &mutable_globals.0; - let mut saved_values = Vec::with_capacity(global_names.len()); - - for global_name in global_names { - let handle = instance.get_global(global_name); - let value = instance.get_global_value(&handle); - saved_values.push(SavedValue { handle, value }); - } - - Self(saved_values) - } - - /// Apply the snapshot to the given instance. - /// - /// This instance must be the same that was used for creation of this snapshot. - pub fn apply(&self, instance: &mut Instance) - where - Instance: InstanceGlobals, - { - for saved_value in &self.0 { - instance.set_global_value(&saved_value.handle, saved_value.value); - } - } -} diff --git a/substrate/client/executor/common/src/runtime_blob/mod.rs b/substrate/client/executor/common/src/runtime_blob/mod.rs index 07a0945cc2b6..8261d07eda5e 100644 --- a/substrate/client/executor/common/src/runtime_blob/mod.rs +++ b/substrate/client/executor/common/src/runtime_blob/mod.rs @@ -46,10 +46,6 @@ //! is free of any floating point operations, which is a useful step towards making instances //! produced from such a module deterministic. -mod data_segments_snapshot; -mod globals_snapshot; mod runtime_blob; -pub use data_segments_snapshot::DataSegmentsSnapshot; -pub use globals_snapshot::{ExposedMutableGlobalsSet, GlobalsSnapshot, InstanceGlobals}; pub use runtime_blob::RuntimeBlob; diff --git a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs index 24dc7e393a4b..becf9e219b0b 100644 --- a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -20,8 +20,8 @@ use crate::{error::WasmError, wasm_runtime::HeapAllocStrategy}; use wasm_instrument::{ export_mutable_globals, parity_wasm::elements::{ - deserialize_buffer, serialize, DataSegment, ExportEntry, External, Internal, MemorySection, - MemoryType, Module, Section, + deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, + Module, Section, }, }; @@ -52,11 +52,6 @@ impl RuntimeBlob { Ok(Self { raw_module }) } - /// Extract the data segments from the given wasm code. - pub(super) fn data_segments(&self) -> Vec { - self.raw_module.data_section().map(|ds| ds.entries()).unwrap_or(&[]).to_vec() - } - /// The number of globals defined in locally in this module. pub fn declared_globals_count(&self) -> u32 { self.raw_module @@ -190,16 +185,6 @@ impl RuntimeBlob { Ok(()) } - /// Returns an iterator of all globals which were exported by [`expose_mutable_globals`]. - pub(super) fn exported_internal_global_names(&self) -> impl Iterator { - let exports = self.raw_module.export_section().map(|es| es.entries()).unwrap_or(&[]); - exports.iter().filter_map(|export| match export.internal() { - Internal::Global(_) if export.field().starts_with("exported_internal_global") => - Some(export.field()), - _ => None, - }) - } - /// Scans the wasm blob for the first section with the name that matches the given. Returns the /// contents of the custom section if found or `None` otherwise. pub fn custom_section_contents(&self, section_name: &str) -> Option<&[u8]> { diff --git a/substrate/client/executor/common/src/wasm_runtime.rs b/substrate/client/executor/common/src/wasm_runtime.rs index 5dac77e59fa7..d8e142b9d559 100644 --- a/substrate/client/executor/common/src/wasm_runtime.rs +++ b/substrate/client/executor/common/src/wasm_runtime.rs @@ -115,16 +115,6 @@ pub trait WasmInstance: Send { /// /// This method is only suitable for getting immutable globals. fn get_global_const(&mut self, name: &str) -> Result, Error>; - - /// **Testing Only**. This function returns the base address of the linear memory. - /// - /// This is meant to be the starting address of the memory mapped area for the linear memory. - /// - /// This function is intended only for a specific test that measures physical memory - /// consumption. - fn linear_memory_base_ptr(&self) -> Option<*const u8> { - None - } } /// Defines the heap pages allocation strategy the wasm runtime should use. diff --git a/substrate/client/executor/src/integration_tests/linux.rs b/substrate/client/executor/src/integration_tests/linux.rs deleted file mode 100644 index 68ac37e9011a..000000000000 --- a/substrate/client/executor/src/integration_tests/linux.rs +++ /dev/null @@ -1,84 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Tests that are only relevant for Linux. - -mod smaps; - -use super::mk_test_runtime; -use crate::WasmExecutionMethod; -use codec::Encode as _; -use sc_executor_common::wasm_runtime::DEFAULT_HEAP_ALLOC_STRATEGY; - -use self::smaps::Smaps; - -#[test] -fn memory_consumption_compiled() { - let _ = sp_tracing::try_init_simple(); - - if std::env::var("RUN_TEST").is_ok() { - memory_consumption(WasmExecutionMethod::Compiled { - instantiation_strategy: - sc_executor_wasmtime::InstantiationStrategy::LegacyInstanceReuse, - }); - } else { - // We need to run the test in isolation, to not getting interfered by the other tests. - let executable = std::env::current_exe().unwrap(); - let status = std::process::Command::new(executable) - .env("RUN_TEST", "1") - .args(&["--nocapture", "memory_consumption_compiled"]) - .status() - .unwrap(); - - assert!(status.success()); - } -} - -fn memory_consumption(wasm_method: WasmExecutionMethod) { - // This aims to see if linear memory stays backed by the physical memory after a runtime call. - // - // For that we make a series of runtime calls, probing the RSS for the VMA matching the linear - // memory. After the call we expect RSS to be equal to 0. - - let runtime = mk_test_runtime(wasm_method, DEFAULT_HEAP_ALLOC_STRATEGY); - - let mut instance = runtime.new_instance().unwrap(); - let heap_base = instance - .get_global_const("__heap_base") - .expect("`__heap_base` is valid") - .expect("`__heap_base` exists") - .as_i32() - .expect("`__heap_base` is an `i32`"); - - fn probe_rss(instance: &dyn sc_executor_common::wasm_runtime::WasmInstance) -> usize { - let base_addr = instance.linear_memory_base_ptr().unwrap() as usize; - Smaps::new().get_rss(base_addr).expect("failed to get rss") - } - - instance - .call_export("test_dirty_plenty_memory", &(heap_base as u32, 1u32).encode()) - .unwrap(); - let probe_1 = probe_rss(&*instance); - instance - .call_export("test_dirty_plenty_memory", &(heap_base as u32, 1024u32).encode()) - .unwrap(); - let probe_2 = probe_rss(&*instance); - - assert_eq!(probe_1, 0); - assert_eq!(probe_2, 0); -} diff --git a/substrate/client/executor/src/integration_tests/linux/smaps.rs b/substrate/client/executor/src/integration_tests/linux/smaps.rs deleted file mode 100644 index 1ac570dd8d5f..000000000000 --- a/substrate/client/executor/src/integration_tests/linux/smaps.rs +++ /dev/null @@ -1,82 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! A tool for extracting information about the memory consumption of the current process from -//! the procfs. - -use std::{collections::BTreeMap, ops::Range}; - -/// An interface to the /proc/self/smaps -/// -/// See docs about [procfs on kernel.org][procfs] -/// -/// [procfs]: https://www.kernel.org/doc/html/latest/filesystems/proc.html -pub struct Smaps(Vec<(Range, BTreeMap)>); - -impl Smaps { - pub fn new() -> Self { - let regex_start = regex::RegexBuilder::new("^([0-9a-f]+)-([0-9a-f]+)") - .multi_line(true) - .build() - .unwrap(); - let regex_kv = regex::RegexBuilder::new(r#"^([^:]+):\s*(\d+) kB"#) - .multi_line(true) - .build() - .unwrap(); - let smaps = std::fs::read_to_string("/proc/self/smaps").unwrap(); - let boundaries: Vec<_> = regex_start - .find_iter(&smaps) - .map(|matched| matched.start()) - .chain(std::iter::once(smaps.len())) - .collect(); - - let mut output = Vec::new(); - for window in boundaries.windows(2) { - let chunk = &smaps[window[0]..window[1]]; - let caps = regex_start.captures(chunk).unwrap(); - let start = usize::from_str_radix(caps.get(1).unwrap().as_str(), 16).unwrap(); - let end = usize::from_str_radix(caps.get(2).unwrap().as_str(), 16).unwrap(); - - let values = regex_kv - .captures_iter(chunk) - .map(|cap| { - let key = cap.get(1).unwrap().as_str().to_owned(); - let value = cap.get(2).unwrap().as_str().parse().unwrap(); - (key, value) - }) - .collect(); - - output.push((start..end, values)); - } - - Self(output) - } - - fn get_map(&self, addr: usize) -> &BTreeMap { - &self - .0 - .iter() - .find(|(range, _)| addr >= range.start && addr < range.end) - .unwrap() - .1 - } - - pub fn get_rss(&self, addr: usize) -> Option { - self.get_map(addr).get("Rss").cloned() - } -} diff --git a/substrate/client/executor/src/integration_tests/mod.rs b/substrate/client/executor/src/integration_tests/mod.rs index 37aed8eef96a..0bd080c24357 100644 --- a/substrate/client/executor/src/integration_tests/mod.rs +++ b/substrate/client/executor/src/integration_tests/mod.rs @@ -16,9 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(target_os = "linux")] -mod linux; - use assert_matches::assert_matches; use codec::{Decode, Encode}; use sc_executor_common::{ @@ -81,14 +78,6 @@ macro_rules! test_wasm_execution { instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::Pooling }); } - - #[test] - fn [<$method_name _compiled_legacy_instance_reuse>]() { - let _ = sp_tracing::try_init_simple(); - $method_name(WasmExecutionMethod::Compiled { - instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::LegacyInstanceReuse - }); - } } }; } diff --git a/substrate/client/executor/wasmtime/src/host.rs b/substrate/client/executor/wasmtime/src/host.rs index 9bd3ca3dade5..f8c78cbb660e 100644 --- a/substrate/client/executor/wasmtime/src/host.rs +++ b/substrate/client/executor/wasmtime/src/host.rs @@ -32,7 +32,7 @@ use crate::{instance_wrapper::MemoryWrapper, runtime::StoreData, util}; pub struct HostState { /// The allocator instance to keep track of allocated memory. /// - /// This is stored as an `Option` as we need to temporarly set this to `None` when we are + /// This is stored as an `Option` as we need to temporarily set this to `None` when we are /// allocating/deallocating memory. The problem being that we can only mutable access `caller` /// once. allocator: Option, diff --git a/substrate/client/executor/wasmtime/src/instance_wrapper.rs b/substrate/client/executor/wasmtime/src/instance_wrapper.rs index 6d319cce509e..acc799061c27 100644 --- a/substrate/client/executor/wasmtime/src/instance_wrapper.rs +++ b/substrate/client/executor/wasmtime/src/instance_wrapper.rs @@ -116,14 +116,14 @@ impl EntryPoint { pub(crate) struct MemoryWrapper<'a, C>(pub &'a wasmtime::Memory, pub &'a mut C); impl sc_allocator::Memory for MemoryWrapper<'_, C> { - fn with_access(&self, run: impl FnOnce(&[u8]) -> R) -> R { - run(self.0.data(&self.1)) - } - fn with_access_mut(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R { run(self.0.data_mut(&mut self.1)) } + fn with_access(&self, run: impl FnOnce(&[u8]) -> R) -> R { + run(self.0.data(&self.1)) + } + fn grow(&mut self, additional: u32) -> std::result::Result<(), ()> { self.0 .grow(&mut self.1, additional as u64) @@ -153,11 +153,6 @@ impl sc_allocator::Memory for MemoryWrapper<'_, C> { /// routines. pub struct InstanceWrapper { instance: Instance, - /// The memory instance of the `instance`. - /// - /// It is important to make sure that we don't make any copies of this to make it easier to - /// proof - memory: Memory, store: Store, } @@ -177,7 +172,7 @@ impl InstanceWrapper { store.data_mut().memory = Some(memory); store.data_mut().table = table; - Ok(InstanceWrapper { instance, memory, store }) + Ok(InstanceWrapper { instance, store }) } /// Resolves a substrate entrypoint by the given name. @@ -280,11 +275,6 @@ impl InstanceWrapper { _ => Err("Unknown value type".into()), } } - - /// Get a global with the given `name`. - pub fn get_global(&mut self, name: &str) -> Option { - self.instance.get_global(&mut self.store, name) - } } /// Extract linear memory instance from the given instance. @@ -311,76 +301,6 @@ fn get_table(instance: &Instance, ctx: &mut Store) -> Option { /// Functions related to memory. impl InstanceWrapper { - /// Returns the pointer to the first byte of the linear memory for this instance. - pub fn base_ptr(&self) -> *const u8 { - self.memory.data_ptr(&self.store) - } - - /// If possible removes physical backing from the allocated linear memory which - /// leads to returning the memory back to the system; this also zeroes the memory - /// as a side-effect. - pub fn decommit(&mut self) { - if self.memory.data_size(&self.store) == 0 { - return - } - - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - use std::sync::Once; - - unsafe { - let ptr = self.memory.data_ptr(&self.store); - let len = self.memory.data_size(&self.store); - - // Linux handles MADV_DONTNEED reliably. The result is that the given area - // is unmapped and will be zeroed on the next pagefault. - if libc::madvise(ptr as _, len, libc::MADV_DONTNEED) != 0 { - static LOGGED: Once = Once::new(); - LOGGED.call_once(|| { - log::warn!( - "madvise(MADV_DONTNEED) failed: {}", - std::io::Error::last_os_error(), - ); - }); - } else { - return; - } - } - } else if #[cfg(target_os = "macos")] { - use std::sync::Once; - - unsafe { - let ptr = self.memory.data_ptr(&self.store); - let len = self.memory.data_size(&self.store); - - // On MacOS we can simply overwrite memory mapping. - if libc::mmap( - ptr as _, - len, - libc::PROT_READ | libc::PROT_WRITE, - libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, - -1, - 0, - ) == libc::MAP_FAILED { - static LOGGED: Once = Once::new(); - LOGGED.call_once(|| { - log::warn!( - "Failed to decommit WASM instance memory through mmap: {}", - std::io::Error::last_os_error(), - ); - }); - } else { - return; - } - } - } - } - - // If we're on an unsupported OS or the memory couldn't have been - // decommited for some reason then just manually zero it out. - self.memory.data_mut(self.store.as_context_mut()).fill(0); - } - pub(crate) fn store(&self) -> &Store { &self.store } @@ -389,17 +309,3 @@ impl InstanceWrapper { &mut self.store } } - -#[test] -fn decommit_works() { - let engine = wasmtime::Engine::default(); - let code = wat::parse_str("(module (memory (export \"memory\") 1 4))").unwrap(); - let module = wasmtime::Module::new(&engine, code).unwrap(); - let linker = wasmtime::Linker::new(&engine); - let instance_pre = linker.instantiate_pre(&module).unwrap(); - let mut wrapper = InstanceWrapper::new(&engine, &instance_pre).unwrap(); - unsafe { *wrapper.memory.data_ptr(&wrapper.store) = 42 }; - assert_eq!(unsafe { *wrapper.memory.data_ptr(&wrapper.store) }, 42); - wrapper.decommit(); - assert_eq!(unsafe { *wrapper.memory.data_ptr(&wrapper.store) }, 0); -} diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index 23b069870aa3..ae78137959be 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -27,9 +27,7 @@ use crate::{ use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator}; use sc_executor_common::{ error::{Error, Result, WasmError}, - runtime_blob::{ - self, DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob, - }, + runtime_blob::RuntimeBlob, util::checked_range, wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmInstance, WasmModule}, }; @@ -69,17 +67,11 @@ impl StoreData { pub(crate) type Store = wasmtime::Store; enum Strategy { - LegacyInstanceReuse { - instance_wrapper: InstanceWrapper, - globals_snapshot: GlobalsSnapshot, - data_segments_snapshot: Arc, - heap_base: u32, - }, RecreateInstance(InstanceCreator), } struct InstanceCreator { - engine: wasmtime::Engine, + engine: Engine, instance_pre: Arc>, } @@ -89,40 +81,10 @@ impl InstanceCreator { } } -struct InstanceGlobals<'a> { - instance: &'a mut InstanceWrapper, -} - -impl<'a> runtime_blob::InstanceGlobals for InstanceGlobals<'a> { - type Global = wasmtime::Global; - - fn get_global(&mut self, export_name: &str) -> Self::Global { - self.instance - .get_global(export_name) - .expect("get_global is guaranteed to be called with an export name of a global; qed") - } - - fn get_global_value(&mut self, global: &Self::Global) -> Value { - util::from_wasmtime_val(global.get(&mut self.instance.store_mut())) - } - - fn set_global_value(&mut self, global: &Self::Global, value: Value) { - global.set(&mut self.instance.store_mut(), util::into_wasmtime_val(value)).expect( - "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", - ); - } -} - -/// Data required for creating instances with the fast instance reuse strategy. -struct InstanceSnapshotData { - mutable_globals: ExposedMutableGlobalsSet, - data_segments_snapshot: Arc, -} - /// A `WasmModule` implementation using wasmtime to compile the runtime module to machine code /// and execute the compiled code. pub struct WasmtimeRuntime { - engine: wasmtime::Engine, + engine: Engine, instance_pre: Arc>, instantiation_strategy: InternalInstantiationStrategy, } @@ -130,26 +92,6 @@ pub struct WasmtimeRuntime { impl WasmModule for WasmtimeRuntime { fn new_instance(&self) -> Result> { let strategy = match self.instantiation_strategy { - InternalInstantiationStrategy::LegacyInstanceReuse(ref snapshot_data) => { - let mut instance_wrapper = InstanceWrapper::new(&self.engine, &self.instance_pre)?; - let heap_base = instance_wrapper.extract_heap_base()?; - - // This function panics if the instance was created from a runtime blob different - // from which the mutable globals were collected. Here, it is easy to see that there - // is only a single runtime blob and thus it's the same that was used for both - // creating the instance and collecting the mutable globals. - let globals_snapshot = GlobalsSnapshot::take( - &snapshot_data.mutable_globals, - &mut InstanceGlobals { instance: &mut instance_wrapper }, - ); - - Strategy::LegacyInstanceReuse { - instance_wrapper, - globals_snapshot, - data_segments_snapshot: snapshot_data.data_segments_snapshot.clone(), - heap_base, - } - }, InternalInstantiationStrategy::Builtin => Strategy::RecreateInstance(InstanceCreator { engine: self.engine.clone(), instance_pre: self.instance_pre.clone(), @@ -174,39 +116,12 @@ impl WasmtimeInstance { allocation_stats: &mut Option, ) -> Result> { match &mut self.strategy { - Strategy::LegacyInstanceReuse { - ref mut instance_wrapper, - globals_snapshot, - data_segments_snapshot, - heap_base, - } => { - let entrypoint = instance_wrapper.resolve_entrypoint(method)?; - - data_segments_snapshot.apply(|offset, contents| { - util::write_memory_from( - instance_wrapper.store_mut(), - Pointer::new(offset), - contents, - ) - })?; - globals_snapshot.apply(&mut InstanceGlobals { instance: instance_wrapper }); - let allocator = FreeingBumpHeapAllocator::new(*heap_base); - - let result = - perform_call(data, instance_wrapper, entrypoint, allocator, allocation_stats); - - // Signal to the OS that we are done with the linear memory and that it can be - // reclaimed. - instance_wrapper.decommit(); - - result - }, Strategy::RecreateInstance(ref mut instance_creator) => { let mut instance_wrapper = instance_creator.instantiate()?; let heap_base = instance_wrapper.extract_heap_base()?; let entrypoint = instance_wrapper.resolve_entrypoint(method)?; - let allocator = FreeingBumpHeapAllocator::new(heap_base); + perform_call(data, &mut instance_wrapper, entrypoint, allocator, allocation_stats) }, } @@ -226,24 +141,10 @@ impl WasmInstance for WasmtimeInstance { fn get_global_const(&mut self, name: &str) -> Result> { match &mut self.strategy { - Strategy::LegacyInstanceReuse { instance_wrapper, .. } => - instance_wrapper.get_global_val(name), Strategy::RecreateInstance(ref mut instance_creator) => instance_creator.instantiate()?.get_global_val(name), } } - - fn linear_memory_base_ptr(&self) -> Option<*const u8> { - match &self.strategy { - Strategy::RecreateInstance(_) => { - // We do not keep the wasm instance around, therefore there is no linear memory - // associated with it. - None - }, - Strategy::LegacyInstanceReuse { instance_wrapper, .. } => - Some(instance_wrapper.base_ptr()), - } - } } /// Prepare a directory structure and a config file to enable wasmtime caching. @@ -338,7 +239,6 @@ fn common_config(semantics: &Semantics) -> std::result::Result (true, false), InstantiationStrategy::RecreateInstanceCopyOnWrite => (false, true), InstantiationStrategy::RecreateInstance => (false, false), - InstantiationStrategy::LegacyInstanceReuse => (false, false), }; const WASM_PAGE_SIZE: u64 = 65536; @@ -409,7 +309,7 @@ fn common_config(semantics: &Semantics) -> std::result::Result { - let data_segments_snapshot = - DataSegmentsSnapshot::take(&blob).map_err(|e| { - WasmError::Other(format!("cannot take data segments snapshot: {}", e)) - })?; - let data_segments_snapshot = Arc::new(data_segments_snapshot); - let mutable_globals = ExposedMutableGlobalsSet::collect(&blob); - - ( - module, - InternalInstantiationStrategy::LegacyInstanceReuse(InstanceSnapshotData { - data_segments_snapshot, - mutable_globals, - }), - ) - }, InstantiationStrategy::Pooling | InstantiationStrategy::PoolingCopyOnWrite | InstantiationStrategy::RecreateInstance | @@ -679,12 +559,6 @@ where } }, CodeSupplyMode::Precompiled(compiled_artifact_path) => { - if let InstantiationStrategy::LegacyInstanceReuse = - config.semantics.instantiation_strategy - { - return Err(WasmError::Other("the legacy instance reuse instantiation strategy is incompatible with precompiled modules".into())); - } - // SAFETY: The unsafety of `deserialize_file` is covered by this function. The // responsibilities to maintain the invariants are passed to the caller. // @@ -695,12 +569,6 @@ where (module, InternalInstantiationStrategy::Builtin) }, CodeSupplyMode::PrecompiledBytes(compiled_artifact_bytes) => { - if let InstantiationStrategy::LegacyInstanceReuse = - config.semantics.instantiation_strategy - { - return Err(WasmError::Other("the legacy instance reuse instantiation strategy is incompatible with precompiled modules".into())); - } - // SAFETY: The unsafety of `deserialize` is covered by this function. The // responsibilities to maintain the invariants are passed to the caller. // @@ -730,13 +598,6 @@ fn prepare_blob_for_compilation( blob = blob.inject_stack_depth_metering(logical_max)?; } - if let InstantiationStrategy::LegacyInstanceReuse = semantics.instantiation_strategy { - // When this strategy is used this must be called after all other passes which may introduce - // new global variables, otherwise they will not be reset when we call into the runtime - // again. - blob.expose_mutable_globals(); - } - // We don't actually need the memory to be imported so we can just convert any memory // import into an export with impunity. This simplifies our code since `wasmtime` will // now automatically take care of creating the memory for us, and it is also necessary diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index 65093687822d..e185754b0769 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -30,7 +30,7 @@ type HostFunctions = sp_io::SubstrateHostFunctions; #[macro_export] macro_rules! test_wasm_execution { - (@no_legacy_instance_reuse $method_name:ident) => { + ($method_name:ident) => { paste::item! { #[test] fn [<$method_name _recreate_instance_cow>]() { @@ -61,19 +61,6 @@ macro_rules! test_wasm_execution { } } }; - - ($method_name:ident) => { - test_wasm_execution!(@no_legacy_instance_reuse $method_name); - - paste::item! { - #[test] - fn [<$method_name _legacy_instance_reuse>]() { - $method_name( - InstantiationStrategy::LegacyInstanceReuse - ); - } - } - }; } struct RuntimeBuilder { @@ -330,14 +317,14 @@ fn test_max_memory_pages_exported_memory_without_precompilation( test_max_memory_pages(instantiation_strategy, false, false); } -test_wasm_execution!(@no_legacy_instance_reuse test_max_memory_pages_imported_memory_with_precompilation); +test_wasm_execution!(test_max_memory_pages_imported_memory_with_precompilation); fn test_max_memory_pages_imported_memory_with_precompilation( instantiation_strategy: InstantiationStrategy, ) { test_max_memory_pages(instantiation_strategy, true, true); } -test_wasm_execution!(@no_legacy_instance_reuse test_max_memory_pages_exported_memory_with_precompilation); +test_wasm_execution!(test_max_memory_pages_exported_memory_with_precompilation); fn test_max_memory_pages_exported_memory_with_precompilation( instantiation_strategy: InstantiationStrategy, ) { diff --git a/substrate/client/executor/wasmtime/src/util.rs b/substrate/client/executor/wasmtime/src/util.rs index c38d969ce9dc..7af554c35e1b 100644 --- a/substrate/client/executor/wasmtime/src/util.rs +++ b/substrate/client/executor/wasmtime/src/util.rs @@ -21,33 +21,9 @@ use sc_executor_common::{ error::{Error, Result}, util::checked_range, }; -use sp_wasm_interface::{Pointer, Value}; +use sp_wasm_interface::Pointer; use wasmtime::{AsContext, AsContextMut}; -/// Converts a [`wasmtime::Val`] into a substrate runtime interface [`Value`]. -/// -/// Panics if the given value doesn't have a corresponding variant in `Value`. -pub fn from_wasmtime_val(val: wasmtime::Val) -> Value { - match val { - wasmtime::Val::I32(v) => Value::I32(v), - wasmtime::Val::I64(v) => Value::I64(v), - wasmtime::Val::F32(f_bits) => Value::F32(f_bits), - wasmtime::Val::F64(f_bits) => Value::F64(f_bits), - v => panic!("Given value type is unsupported by Substrate: {:?}", v), - } -} - -/// Converts a sp_wasm_interface's [`Value`] into the corresponding variant in wasmtime's -/// [`wasmtime::Val`]. -pub fn into_wasmtime_val(value: Value) -> wasmtime::Val { - match value { - Value::I32(v) => wasmtime::Val::I32(v), - Value::I64(v) => wasmtime::Val::I64(v), - Value::F32(f_bits) => wasmtime::Val::F32(f_bits), - Value::F64(f_bits) => wasmtime::Val::F64(f_bits), - } -} - /// Read data from the instance memory into a slice. /// /// Returns an error if the read would go out of the memory bounds. @@ -140,8 +116,8 @@ pub(crate) fn replace_strategy_if_broken(strategy: &mut InstantiationStrategy) { // These strategies require a working `madvise` to be sound. InstantiationStrategy::PoolingCopyOnWrite => InstantiationStrategy::Pooling, - InstantiationStrategy::RecreateInstanceCopyOnWrite | - InstantiationStrategy::LegacyInstanceReuse => InstantiationStrategy::RecreateInstance, + InstantiationStrategy::RecreateInstanceCopyOnWrite => + InstantiationStrategy::RecreateInstance, }; use std::sync::OnceLock; From a8e82a365e2a5f57a3bd51cb293dd6fbec4bf4f5 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Mon, 18 Sep 2023 11:04:47 +0200 Subject: [PATCH 02/16] xcm-builder: PayOverXcm supports fallible convertors for asset kind and beneficiary conversion (#1572) `PayOverXcm` type accepts two converters to transform the `AssetKind` and `Beneficiary` parameter types into recognized `xcm` types. In this PR, we've modified the bounds for these converters, transitioning from `Convert` to `TryConvert`. One such use case for this adjustment is when dealing with versioned xcm types for `AssetKind` and `Beneficiary`. These types might be not convertible to the latest xcm version, hence the need for fallible conversion. This changes required for https://github.com/paritytech/polkadot-sdk/pull/1333 --- .../xcm/xcm-builder/src/location_conversion.rs | 8 ++++---- polkadot/xcm/xcm-builder/src/pay.rs | 18 ++++++++++-------- polkadot/xcm/xcm-builder/src/tests/pay/pay.rs | 6 +++--- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/polkadot/xcm/xcm-builder/src/location_conversion.rs b/polkadot/xcm/xcm-builder/src/location_conversion.rs index 26b48fc88adc..ec23116e0e82 100644 --- a/polkadot/xcm/xcm-builder/src/location_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/location_conversion.rs @@ -18,7 +18,7 @@ use crate::universal_exports::ensure_is_remote; use frame_support::traits::Get; use parity_scale_codec::{Compact, Decode, Encode}; use sp_io::hashing::blake2_256; -use sp_runtime::traits::{AccountIdConversion, Convert, TrailingZeroInput}; +use sp_runtime::traits::{AccountIdConversion, TrailingZeroInput, TryConvert}; use sp_std::{marker::PhantomData, prelude::*}; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -322,10 +322,10 @@ impl>, AccountId: From<[u8; 32]> + Into<[u8; 32]> /// network (provided by `Network`) and the `AccountId`'s `[u8; 32]` datum for the `id`. pub struct AliasesIntoAccountId32(PhantomData<(Network, AccountId)>); impl<'a, Network: Get>, AccountId: Clone + Into<[u8; 32]> + Clone> - Convert<&'a AccountId, MultiLocation> for AliasesIntoAccountId32 + TryConvert<&'a AccountId, MultiLocation> for AliasesIntoAccountId32 { - fn convert(who: &AccountId) -> MultiLocation { - AccountId32 { network: Network::get(), id: who.clone().into() }.into() + fn try_convert(who: &AccountId) -> Result { + Ok(AccountId32 { network: Network::get(), id: who.clone().into() }.into()) } } diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 39e09e056772..0f3a622f4ece 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -20,7 +20,7 @@ use frame_support::traits::{ tokens::{Pay, PaymentStatus}, Get, }; -use sp_runtime::traits::Convert; +use sp_runtime::traits::TryConvert; use sp_std::{marker::PhantomData, vec}; use xcm::{opaque::lts::Weight, prelude::*}; use xcm_executor::traits::{QueryHandler, QueryResponseStatus}; @@ -71,8 +71,8 @@ impl< Timeout: Get, Beneficiary: Clone, AssetKind, - AssetKindToLocatableAsset: Convert, - BeneficiaryRefToLocation: for<'a> Convert<&'a Beneficiary, MultiLocation>, + AssetKindToLocatableAsset: TryConvert, + BeneficiaryRefToLocation: for<'a> TryConvert<&'a Beneficiary, MultiLocation>, > Pay for PayOverXcm< Interior, @@ -96,12 +96,14 @@ impl< asset_kind: Self::AssetKind, amount: Self::Balance, ) -> Result { - let locatable = AssetKindToLocatableAsset::convert(asset_kind); + let locatable = AssetKindToLocatableAsset::try_convert(asset_kind) + .map_err(|_| xcm::latest::Error::InvalidLocation)?; let LocatableAssetId { asset_id, location: asset_location } = locatable; let destination = Querier::UniversalLocation::get() .invert_target(&asset_location) .map_err(|()| Self::Error::LocationNotInvertible)?; - let beneficiary = BeneficiaryRefToLocation::convert(&who); + let beneficiary = BeneficiaryRefToLocation::try_convert(&who) + .map_err(|_| xcm::latest::Error::InvalidLocation)?; let query_id = Querier::new_query(asset_location, Timeout::get(), Interior::get()); @@ -196,10 +198,10 @@ pub struct LocatableAssetId { /// Adapter `struct` which implements a conversion from any `AssetKind` into a [`LocatableAssetId`] /// value using a fixed `Location` for the `location` field. pub struct FixedLocation(sp_std::marker::PhantomData); -impl, AssetKind: Into> Convert +impl, AssetKind: Into> TryConvert for FixedLocation { - fn convert(value: AssetKind) -> LocatableAssetId { - LocatableAssetId { asset_id: value.into(), location: Location::get() } + fn try_convert(value: AssetKind) -> Result { + Ok(LocatableAssetId { asset_id: value.into(), location: Location::get() }) } } diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs index 28b2feec0c23..491a2bcef7a0 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs @@ -29,9 +29,9 @@ pub struct AssetKind { } pub struct LocatableAssetKindConverter; -impl sp_runtime::traits::Convert for LocatableAssetKindConverter { - fn convert(value: AssetKind) -> LocatableAssetId { - LocatableAssetId { asset_id: value.asset_id, location: value.destination } +impl sp_runtime::traits::TryConvert for LocatableAssetKindConverter { + fn try_convert(value: AssetKind) -> Result { + Ok(LocatableAssetId { asset_id: value.asset_id, location: value.destination }) } } From 1d5a9d25e23df2a641b83b839c0a1a88b2e6b5ee Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Mon, 18 Sep 2023 11:05:12 +0200 Subject: [PATCH 03/16] [improve docs] Example pallet crate and Basic Example pallet (#1546) This fixes the broken links in the crate level documentation of the Examples crate. It also updates the documentation for the Basic Example pallet by removing the template for documenting a pallet (we now have [this](https://github.com/paritytech/polkadot-sdk/blob/master/docs/DOCUMENTATION_GUIDELINE.md) to refer to instead). Note: I found it unnecessary to provide a link to the doc guidelines as I don't think this would be where someone should discover them. I also want to flag some ideas that came while making these minor improvements in [this issue](https://github.com/paritytech/polkadot-sdk-docs/issues/27) (for a subsequent PR) as part of ongoing docs work. --- substrate/frame/examples/Cargo.toml | 2 +- substrate/frame/examples/basic/src/lib.rs | 267 ++-------------------- substrate/frame/examples/src/lib.rs | 29 +-- 3 files changed, 42 insertions(+), 256 deletions(-) diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index b072416b6121..9c47d7442111 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "The single package with various examples for frame pallets" +description = "The single package with examples of various types of FRAME pallets" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index 426e9b7ec648..31d20e07f5f7 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -15,258 +15,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! //! # Basic Example Pallet //! -//! -//! The Example: A simple example of a FRAME pallet demonstrating -//! concepts, APIs and structures common to most FRAME runtimes. -//! -//! Run `cargo doc --package pallet-example-basic --open` to view this pallet's documentation. +//! A pallet demonstrating concepts, APIs and structures common to most FRAME runtimes. //! //! **This pallet serves as an example and is not meant to be used in production.** //! -//! ### Documentation Guidelines: -//! -//! -//!
    -//!
  • Documentation comments (i.e. /// comment) - should -//! accompany pallet functions and be restricted to the pallet interface, -//! not the internals of the pallet implementation. Only state inputs, -//! outputs, and a brief description that mentions whether calling it -//! requires root, but without repeating the source code details. -//! Capitalize the first word of each documentation comment and end it with -//! a full stop. See -//! Generic example of annotating source code with documentation comments
  • -//! -//!
  • Self-documenting code - Try to refactor code to be self-documenting.
  • -//! -//!
  • Code comments - Supplement complex code with a brief explanation, not every line of -//! code.
  • -//! -//!
  • Identifiers - surround by backticks (i.e. INHERENT_IDENTIFIER, -//! InherentType, u64)
  • -//! -//!
  • Usage scenarios - should be simple doctests. The compiler should ensure they stay -//! valid.
  • -//! -//!
  • Extended tutorials - should be moved to external files and refer to.
  • -//! -//!
  • Mandatory - include all of the sections/subsections where MUST is specified.
  • -//! -//!
  • Optional - optionally include sections/subsections where CAN is specified.
  • -//!
-//! -//! ### Documentation Template:
-//! -//! Copy and paste this template from frame/examples/basic/src/lib.rs into file -//! `frame//src/lib.rs` of your own custom pallet and complete it. -//!

-//! // Add heading with custom pallet name
-//!
-//! \#  Pallet
-//!
-//! // Add simple description
-//!
-//! // Include the following links that shows what trait needs to be implemented to use the pallet
-//! // and the supported dispatchables that are documented in the Call enum.
-//!
-//! - \[`Config`]
-//! - \[`Call`]
-//! - \[`Pallet`]
-//!
-//! \## Overview
-//!
-//! 
-//! // Short description of pallet's purpose.
-//! // Links to Traits that should be implemented.
-//! // What this pallet is for.
-//! // What functionality the pallet provides.
-//! // When to use the pallet (use case examples).
-//! // How it is used.
-//! // Inputs it uses and the source of each input.
-//! // Outputs it produces.
-//!
-//! 
-//! 
-//!
-//! \## Terminology
-//!
-//! // Add terminology used in the custom pallet. Include concepts, storage items, or actions that
-//! you think // deserve to be noted to give context to the rest of the documentation or pallet
-//! usage. The author needs to // use some judgment about what is included. We don't want a list of
-//! every storage item nor types - the user // can go to the code for that. For example, "transfer
-//! fee" is obvious and should not be included, but // "free balance" and "reserved balance" should
-//! be noted to give context to the pallet. // Please do not link to outside resources. The
-//! reference docs should be the ultimate source of truth.
-//!
-//! 
-//!
-//! \## Goals
-//!
-//! // Add goals that the custom pallet is designed to achieve.
-//!
-//! 
-//!
-//! \### Scenarios
-//!
-//! 
-//!
-//! \#### 
-//!
-//! // Describe requirements prior to interacting with the custom pallet.
-//! // Describe the process of interacting with the custom pallet for this scenario and public API
-//! functions used.
-//!
-//! \## Interface
-//!
-//! \### Supported Origins
-//!
-//! // What origins are used and supported in this pallet (root, signed, none)
-//! // i.e. root when \`ensure_root\` used
-//! // i.e. none when \`ensure_none\` used
-//! // i.e. signed when \`ensure_signed\` used
-//!
-//! \`inherent\` 
-//!
-//! 
-//! 
-//!
-//! \### Types
-//!
-//! // Type aliases. Include any associated types and where the user would typically define them.
-//!
-//! \`ExampleType\` 
-//!
-//! 
-//!
-//! // Reference documentation of aspects such as `storageItems` and `dispatchable` functions should
-//! // only be included in the  Rustdocs for Substrate and not repeated in the
-//! // README file.
-//!
-//! \### Dispatchable Functions
-//!
-//! 
-//!
-//! // A brief description of dispatchable functions and a link to the rustdoc with their actual
-//! documentation.
-//!
-//! // MUST have link to Call enum
-//! // MUST have origin information included in function doc
-//! // CAN have more info up to the user
-//!
-//! \### Public Functions
-//!
-//! 
-//!
-//! // A link to the rustdoc and any notes about usage in the pallet, not for specific functions.
-//! // For example, in the Balances Pallet: "Note that when using the publicly exposed functions,
-//! // you (the runtime developer) are responsible for implementing any necessary checks
-//! // (e.g. that the sender is the signer) before calling a function that will affect storage."
-//!
-//! 
-//!
-//! // It is up to the writer of the respective pallet (with respect to how much information to
-//! provide).
-//!
-//! \#### Public Inspection functions - Immutable (getters)
-//!
-//! // Insert a subheading for each getter function signature
-//!
-//! \##### \`example_getter_name()\`
-//!
-//! // What it returns
-//! // Why, when, and how often to call it
-//! // When it could panic or error
-//! // When safety issues to consider
-//!
-//! \#### Public Mutable functions (changing state)
-//!
-//! // Insert a subheading for each setter function signature
-//!
-//! \##### \`example_setter_name(origin, parameter_name: T::ExampleType)\`
-//!
-//! // What state it changes
-//! // Why, when, and how often to call it
-//! // When it could panic or error
-//! // When safety issues to consider
-//! // What parameter values are valid and why
-//!
-//! \### Storage Items
-//!
-//! // Explain any storage items included in this pallet
-//!
-//! \### Digest Items
-//!
-//! // Explain any digest items included in this pallet
-//!
-//! \### Inherent Data
-//!
-//! // Explain what inherent data (if any) is defined in the pallet and any other related types
-//!
-//! \### Events:
-//!
-//! // Insert events for this pallet if any
-//!
-//! \### Errors:
-//!
-//! // Explain what generates errors
-//!
-//! \## Usage
-//!
-//! // Insert 2-3 examples of usage and code snippets that show how to
-//! // use  Pallet in a custom pallet.
-//!
-//! \### Prerequisites
-//!
-//! // Show how to include necessary imports for  and derive
-//! // your pallet configuration trait with the `INSERT_CUSTOM_PALLET_NAME` trait.
-//!
-//! \```rust
-//! use ;
-//!
-//! pub trait Config: ::Config { }
-//! \```
-//!
-//! \### Simple Code Snippet
-//!
-//! // Show a simple example (e.g. how to query a public getter function of
-//! )
-//!
-//! \### Example from FRAME
-//!
-//! // Show a usage example in an actual runtime
-//!
-//! // See:
-//! // - Substrate TCR 
-//! // - Substrate Kitties 
-//!
-//! \## Genesis Config
-//!
-//! 
-//!
-//! \## Dependencies
+//! > Made with *Substrate*, for *Polkadot*.
 //!
-//! // Dependencies on other FRAME pallets and the genesis config should be mentioned,
-//! // but not the Rust Standard Library.
-//! // Genesis configuration modifications that may be made to incorporate this pallet
-//! // Interaction with other pallets
+//! [![github]](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/examples/basic)
+//! [![polkadot]](https://polkadot.network)
 //!
-//! 
+//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white
+//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
 //!
-//! \## Related Pallets
+//! ## Pallet API
 //!
-//! // Interaction with other pallets in the form of a bullet point list
+//! See the [`pallet`] module for more information about the interfaces this pallet exposes,
+//! including its configuration trait, dispatchables, storage items, events and errors.
 //!
-//! \## References
+//! ## Overview
 //!
-//! 
+//! This pallet provides basic examples of using:
 //!
-//! // Links to reference material, if applicable. For example, Phragmen, W3F research, etc.
-//! // that the implementation is based on.
-//! 

+//! - A custom weight calculator able to classify a call's dispatch class (see: +//! [`frame_support::dispatch::DispatchClass`]) +//! - Pallet hooks to implement some custom logic that's executed before and after a block is +//! imported (see: [`frame_support::traits::Hooks`]) +//! - Inherited weight annotation for pallet calls, used to create less repetition for calls that +//! use the [`Config::WeightInfo`] trait to calculate call weights. This can also be overridden, +//! as demonstrated by [`Call::set_dummy`]. +//! - A private function that performs a storage update. +//! - A simple signed extension implementation (see: [`sp_runtime::traits::SignedExtension`]) which +//! increases the priority of the [`Call::set_dummy`] if it's present and drops any transaction +//! with an encoded length higher than 200 bytes. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -381,7 +163,8 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - // Pallet implements [`Hooks`] trait to define some logic to execute in some context. + // This pallet implements the [`frame_support::traits::Hooks`] trait to define some logic to + // execute in some context. #[pallet::hooks] impl Hooks> for Pallet { // `on_initialize` is executed at the beginning of the block before any extrinsic are @@ -686,7 +469,7 @@ impl Pallet { // sender of the transaction (if signed) are also provided. // // The full list of hooks that can be added to a signed extension can be found -// [here](https://crates.parity.io/sp_runtime/traits/trait.SignedExtension.html). +// [here](https://paritytech.github.io/polkadot-sdk/master/sp_runtime/traits/trait.SignedExtension.html). // // The signed extensions are aggregated in the runtime file of a substrate chain. All extensions // should be aggregated in a tuple and passed to the `CheckedExtrinsic` and `UncheckedExtrinsic` diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index d1cd32bb50f2..a7084fc6ef9b 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -17,24 +17,27 @@ //! # FRAME Pallet Examples //! -//! This crate contains examples of FRAME pallets. It is not intended to be used in production. +//! This crate contains a collection of simple examples of FRAME pallets, demonstrating useful +//! features in action. It is not intended to be used in production. //! //! ## Pallets //! -//! - [**`pallet_example_basic`**](./basic): A simple example of a FRAME pallet demonstrating -//! concepts, APIs and structures common to most FRAME runtimes. +//! - [`pallet_example_basic`]: This pallet demonstrates concepts, APIs and structures common to +//! most FRAME runtimes. //! -//! - [**`pallet_example_offchain_worker`**](./offchain-worker): A simple example of a FRAME pallet -//! demonstrating concepts, APIs and structures common to most offchain workers. +//! - [`pallet_example_offchain_worker`]: This pallet demonstrates concepts, APIs and structures +//! common to most offchain workers. //! -//! - [**`pallet-default-config-example`**](./default-config): A simple example of a FRAME pallet -//! demonstrating the simpler way to implement `Config` trait of pallets. +//! - [`pallet_default_config_example`]: This pallet demonstrates different ways to implement the +//! `Config` trait of pallets. //! -//! - [**`pallet-dev-mode`**](./dev-mode): A simple example of a FRAME pallet demonstrating the ease -//! of requirements for a pallet in dev mode. +//! - [`pallet_dev_mode`]: This pallet demonstrates the ease of requirements for a pallet in "dev +//! mode". //! -//! - [**`pallet-example-kitchensink`**](./kitchensink): A simple example of a FRAME pallet -//! demonstrating a catalog of the the FRAME macros and their various syntax options. +//! - [`pallet_example_kitchensink`]: This pallet demonstrates a catalog of all FRAME macros in use +//! and their various syntax options. //! -//! - [**`pallet-example-split`**](./split): A simple example of a FRAME pallet demonstrating the -//! ability to split sections across multiple files. +//! - [`pallet_example_split`]: A simple example of a FRAME pallet demonstrating the ability to +//! split sections across multiple files. +//! +//! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. From f6072e8be3adede66f5e56104f77920fe5c3b5db Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Mon, 18 Sep 2023 11:09:17 +0200 Subject: [PATCH 04/16] [improve docs]: Timestamp pallet (#1435) This PR improves the docs for the Timestamp pallet by following our [Documentation Guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/DOCUMENTATION_GUIDELINE.md) more closely. --------- Co-authored-by: Juan Co-authored-by: Francisco Aguirre --- Cargo.lock | 1 + docs/DOCUMENTATION_GUIDELINE.md | 33 +++-- substrate/frame/timestamp/Cargo.toml | 2 + substrate/frame/timestamp/src/lib.rs | 162 ++++++++++++++-------- substrate/frame/timestamp/src/tests.rs | 2 + substrate/primitives/timestamp/src/lib.rs | 2 +- 6 files changed, 126 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06d4b34031f8..f09db7643e08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10502,6 +10502,7 @@ dependencies = [ name = "pallet-timestamp" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", diff --git a/docs/DOCUMENTATION_GUIDELINE.md b/docs/DOCUMENTATION_GUIDELINE.md index f6c8cac7cd2a..dbb4298d50fe 100644 --- a/docs/DOCUMENTATION_GUIDELINE.md +++ b/docs/DOCUMENTATION_GUIDELINE.md @@ -123,9 +123,9 @@ explicitly depend on them, you have likely not designed it properly. #### Proc-Macros -Note that there are special considerations when documenting proc macros. Doc links will appear to function _within_ your +Note that there are special considerations when documenting proc macros. Doc links will appear to function *within* your proc macro crate, but often will no longer function when these proc macros are re-exported elsewhere in your project. -The exception is doc links to _other proc macros_ which will function just fine if they are also being re-exported. It +The exception is doc links to *other proc macros* which will function just fine if they are also being re-exported. It is also often necessary to disambiguate between a proc macro and a function of the same name, which can be done using the `macro@my_macro_name` syntax in your link. Read more about how to correctly use links in your rust-docs [here](https://doc.rust-lang.org/rustdoc/write-documentation/linking-to-items-by-name.html#valid-links) and @@ -189,6 +189,7 @@ fn multiply_by_2(x: u32) -> u32 { .. } // More efficiency can be achieved if we improve this via such and such. fn multiply_by_2(x: u32) -> u32 { .. } ``` + They are both roughly conveying the same set of facts, but one is easier to follow because it was formatted cleanly. Especially for traits and types that you can foresee will be seen and used a lot, try and write a well formatted version. @@ -203,7 +204,6 @@ properly do this. --- - ## Pallet Crates The guidelines so far have been general in nature, and are applicable to crates that are pallets and crates that're not @@ -223,6 +223,14 @@ For the top-level pallet docs, consider the following template: //! //! . //! +//! ## Pallet API +//! +//! +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, including its +//! configuration trait, dispatchables, storage items, events and errors. +//! //! ## Overview //! //! @@ -233,20 +241,12 @@ For the top-level pallet docs, consider the following template: //! //! ### Example //! -//! . +//! //! -//! ## Pallet API -//! -//! -//! -//! See the [`pallet`] module for more information about the interfaces this pallet exposes, including its configuration -//! trait, dispatchables, storage items, events and errors. -//! -//! +//! //! //! This section can most often be left as-is. //! @@ -272,7 +272,6 @@ For the top-level pallet docs, consider the following template: //! up> ``` - This template's details (heading 3s and beyond) are left flexible, and at the discretion of the developer to make the best final choice about. For example, you might want to include `### Terminology` or not. Moreover, you might find it more useful to include it in `## Overview`. diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index a39c79892d19..6759d90aaf41 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -27,6 +27,8 @@ sp-std = { path = "../../primitives/std", default-features = false} sp-storage = { path = "../../primitives/storage", default-features = false} sp-timestamp = { path = "../../primitives/timestamp", default-features = false} +docify = "0.2.1" + [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index 4eb95941d782..ad055bab004f 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -15,53 +15,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Timestamp Pallet -//! -//! The Timestamp pallet provides functionality to get and set the on-chain time. -//! -//! - [`Config`] -//! - [`Call`] -//! - [`Pallet`] -//! -//! ## Overview -//! -//! The Timestamp pallet allows the validators to set and validate a timestamp with each block. -//! -//! It uses inherents for timestamp data, which is provided by the block author and -//! validated/verified by other validators. The timestamp can be set only once per block and must be -//! set each block. There could be a constraint on how much time must pass before setting the new -//! timestamp. +//! > Made with *Substrate*, for *Polkadot*. //! -//! **NOTE:** The Timestamp pallet is the recommended way to query the on-chain time instead of -//! using an approach based on block numbers. The block number based time measurement can cause -//! issues because of cumulative calculation errors and hence should be avoided. +//! [![github]](https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp) +//! [![polkadot]](https://polkadot.network) //! -//! ## Interface +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github //! -//! ### Dispatchable Functions -//! -//! * `set` - Sets the current time. -//! -//! ### Public functions +//! # Timestamp Pallet //! -//! * `get` - Gets the current time for the current block. If this function is called prior to -//! setting the timestamp, it will return the timestamp of the previous block. +//! A pallet that provides a way for consensus systems to set and check the onchain time. //! -//! ### Config Getters +//! ## Pallet API //! -//! * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! ## Overview //! -//! ## Usage +//! The Timestamp pallet is designed to create a consensus-based time source. This helps ensure that +//! nodes maintain a synchronized view of time that all network participants can agree on. //! -//! The following example shows how to use the Timestamp pallet in your custom pallet to query the -//! current timestamp. +//! It defines an _acceptable range_ using a configurable constant to specify how much time must +//! pass before setting the new timestamp. Validator nodes in the network must verify that the +//! timestamp falls within this acceptable range and reject blocks that do not. //! -//! ### Prerequisites +//! > **Note:** The timestamp set by this pallet is the recommended way to query the onchain time +//! > instead of using block numbers alone. Measuring time with block numbers can cause cumulative +//! > calculation errors if depended upon in time critical operations and hence should generally be +//! > avoided. //! -//! Import the Timestamp pallet into your custom pallet and derive the pallet configuration -//! trait from the timestamp trait. +//! ## Example //! -//! ### Get current timestamp +//! To get the current time for the current block in another pallet: //! //! ``` //! use pallet_timestamp::{self as timestamp}; @@ -83,7 +70,7 @@ //! #[pallet::weight(0)] //! pub fn get_time(origin: OriginFor) -> DispatchResult { //! let _sender = ensure_signed(origin)?; -//! let _now = >::get(); +//! let _now = timestamp::Pallet::::get(); //! Ok(()) //! } //! } @@ -91,15 +78,52 @@ //! # fn main() {} //! ``` //! -//! ### Example from the FRAME +//! If [`Pallet::get`] is called prior to setting the timestamp, it will return the timestamp of +//! the previous block. //! -//! The [Session pallet](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses -//! the Timestamp pallet for session management. +//! ## Low Level / Implementation Details //! -//! ## Related Pallets +//! A timestamp is added to the chain using an _inherent extrinsic_ that only a block author can +//! submit. Inherents are a special type of extrinsic in Substrate chains that will always be +//! included in a block. //! -//! * [Session](../pallet_session/index.html) - +//! To provide inherent data to the runtime, this pallet implements +//! [`ProvideInherent`](frame_support::inherent::ProvideInherent). It will only create an inherent +//! if the [`Call::set`] dispatchable is called, using the +//! [`inherent`](frame_support::pallet_macros::inherent) macro which enables validator nodes to call +//! into the runtime to check that the timestamp provided is valid. +//! The implementation of [`ProvideInherent`](frame_support::inherent::ProvideInherent) specifies a +//! constant called `MAX_TIMESTAMP_DRIFT_MILLIS` which is used to determine the acceptable range for +//! a valid timestamp. If a block author sets a timestamp to anything that is more than this +//! constant, a validator node will reject the block. +//! +//! The pallet also ensures that a timestamp is set at the start of each block by running an +//! assertion in the `on_finalize` runtime hook. See [`frame_support::traits::Hooks`] for more +//! information about how hooks work. +//! +//! Because inherents are applied to a block in the order they appear in the runtime +//! construction, the index of this pallet in +//! [`construct_runtime`](frame_support::construct_runtime) must always be less than any other +//! pallet that depends on it. +//! +//! The [`Config::OnTimestampSet`] configuration trait can be set to another pallet we want to +//! notify that the timestamp has been updated, as long as it implements [`OnTimestampSet`]. +//! Examples are the Babe and Aura pallets. +//! This pallet also implements [`Time`] and [`UnixTime`] so it can be used to configure other +//! pallets that require these types (e.g. in Staking pallet). +//! +//! ## Panics +//! +//! There are 3 cases where this pallet could cause the runtime to panic. +//! +//! 1. If no timestamp is set at the end of a block. +//! +//! 2. If a timestamp is set more than once per block: +#![doc = docify::embed!("src/tests.rs", double_timestamp_should_fail)] +//! +//! 3. If a timestamp is set before the [`Config::MinimumPeriod`] is elapsed: +#![doc = docify::embed!("src/tests.rs", block_period_minimum_enforced)] +#![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; @@ -123,10 +147,9 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The pallet configuration trait #[pallet::config] pub trait Config: frame_system::Config { - /// Type used for expressing timestamp. + /// Type used for expressing a timestamp. type Moment: Parameter + Default + AtLeast32Bit @@ -135,14 +158,17 @@ pub mod pallet { + MaxEncodedLen + scale_info::StaticTypeInfo; - /// Something which can be notified when the timestamp is set. Set this to `()` if not - /// needed. + /// Something which can be notified (e.g. another pallet) when the timestamp is set. + /// + /// This can be set to `()` if it is not needed. type OnTimestampSet: OnTimestampSet; - /// The minimum period between blocks. Beware that this is different to the *expected* - /// period that the block production apparatus provides. Your chosen consensus system will - /// generally work with this to determine a sensible block time. e.g. For Aura, it will be - /// double this period on default settings. + /// The minimum period between blocks. + /// + /// Be aware that this is different to the *expected* period that the block production + /// apparatus provides. Your chosen consensus system will generally work with this to + /// determine a sensible block time. For example, in the Aura pallet it will be double this + /// period on default settings. #[pallet::constant] type MinimumPeriod: Get; @@ -153,23 +179,31 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - /// Current time for the current block. + /// The current time for the current block. #[pallet::storage] #[pallet::getter(fn now)] pub type Now = StorageValue<_, T::Moment, ValueQuery>; - /// Did the timestamp get updated in this block? + /// Whether the timestamp has been updated in this block. + /// + /// This value is updated to `true` upon successful submission of a timestamp by a node. + /// It is then checked at the end of each block execution in the `on_finalize` hook. #[pallet::storage] pub(super) type DidUpdate = StorageValue<_, bool, ValueQuery>; #[pallet::hooks] impl Hooks> for Pallet { - /// dummy `on_initialize` to return the weight used in `on_finalize`. + /// A dummy `on_initialize` to return the amount of weight that `on_finalize` requires to + /// execute. fn on_initialize(_n: BlockNumberFor) -> Weight { // weight of `on_finalize` T::WeightInfo::on_finalize() } + /// At the end of block execution, the `on_finalize` hook checks that the timestamp was + /// updated. Upon success, it removes the boolean value from storage. If the value resolves + /// to `false`, the pallet will panic. + /// /// ## Complexity /// - `O(1)` fn on_finalize(_n: BlockNumberFor) { @@ -185,13 +219,17 @@ pub mod pallet { /// phase, if this call hasn't been invoked by that time. /// /// The timestamp should be greater than the previous one by the amount specified by - /// `MinimumPeriod`. + /// [`Config::MinimumPeriod`]. + /// + /// The dispatch origin for this call must be _None_. /// - /// The dispatch origin for this call must be `Inherent`. + /// This dispatch class is _Mandatory_ to ensure it gets executed in the block. Be aware + /// that changing the complexity of this call could result exhausting the resources in a + /// block to execute any other calls. /// /// ## Complexity /// - `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`) - /// - 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in + /// - 1 storage read and 1 storage mutation (codec `O(1)` because of `DidUpdate::take` in /// `on_finalize`) /// - 1 event handler `on_timestamp_set`. Must be `O(1)`. #[pallet::call_index(0)] @@ -216,6 +254,14 @@ pub mod pallet { } } + /// To check the inherent is valid, we simply take the max value between the current timestamp + /// and the current timestamp plus the [`Config::MinimumPeriod`]. + /// We also check that the timestamp has not already been set in this block. + /// + /// ## Errors: + /// - [`InherentError::TooFarInFuture`]: If the timestamp is larger than the current timestamp + + /// minimum drift period. + /// - [`InherentError::TooEarly`]: If the timestamp is less than the current + minimum period. #[pallet::inherent] impl ProvideInherent for Pallet { type Call = Call; @@ -285,9 +331,9 @@ impl Pallet { } impl Time for Pallet { + /// A type that represents a unit of time. type Moment = T::Moment; - /// Before the first set of now with inherent the value returned is zero. fn now() -> Self::Moment { Self::now() } diff --git a/substrate/frame/timestamp/src/tests.rs b/substrate/frame/timestamp/src/tests.rs index 317631eeb704..cc49d8a3296e 100644 --- a/substrate/frame/timestamp/src/tests.rs +++ b/substrate/frame/timestamp/src/tests.rs @@ -30,6 +30,7 @@ fn timestamp_works() { }); } +#[docify::export] #[test] #[should_panic(expected = "Timestamp must be updated only once in the block")] fn double_timestamp_should_fail() { @@ -39,6 +40,7 @@ fn double_timestamp_should_fail() { }); } +#[docify::export] #[test] #[should_panic( expected = "Timestamp must increment by at least between sequential blocks" diff --git a/substrate/primitives/timestamp/src/lib.rs b/substrate/primitives/timestamp/src/lib.rs index eeec73efbc8b..d1bd2a3446e6 100644 --- a/substrate/primitives/timestamp/src/lib.rs +++ b/substrate/primitives/timestamp/src/lib.rs @@ -140,7 +140,7 @@ pub enum InherentError { error("The time since the last timestamp is lower than the minimum period.") )] TooEarly, - /// The block timestamp is too far in the future + /// The block timestamp is too far in the future. #[cfg_attr(feature = "std", error("The timestamp of the block is too far in the future."))] TooFarInFuture, } From d569e728ca710c64392e524c9ccd4ec10c370269 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 18 Sep 2023 11:11:08 +0200 Subject: [PATCH 05/16] "Common good" vs "System" parachain clean up (#1406) ## Summary The term "common good parachain" has been abandoned in favor of "system parachain" - e.g. [Joe's speech at Decoded2023](https://youtu.be/CSO-ERHK2gY?t=456). This pull request tries to fix and align code with this vision. ## Impact The important change is implementation of `trait IsSystem` for `Id` [here](https://github.com/paritytech/polkadot-sdk/pull/1406/files#diff-0b7b4f5b962a18ce980354592b55ab2a27b5a2e9f6f8089ec803ca73853e8583R225-R229) where we changed condition from `< 1000` to `<= 1999`, which means that all parachain IDs bellow 1999 (included) are considered as "system parachain" IDs. This change has a direct impact on the following components: #### [ChildSystemParachainAsSuperuser](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/origin_conversion.rs#L72-L88) This origin converter is used for allowing to process XCM `Transact` from "system parachain" on the relay chain - e.g. see [configuration for Kusama](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/kusama/src/xcm_config.rs#L92-L101). Only configured for Kusama, Westend, Rococo runtimes. **No need for this feature anymore.** See [comment](https://github.com/paritytech/polkadot-sdk/pull/1406#issuecomment-1708218715). #### [IsChildSystemParachain](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/barriers.rs#L310-L317) `IsChildSystemParachain` is used with `AllowExplicitUnpaidExecutionFrom` barrier for checking XCM programs (they have to start with `UnpaidExecution` instruction). Only configured for Kusama, Westend, Rococo runtimes. **Overall the impact is low or mostly ok because it only allows unpaid execution for "system parachains" (e.g. AssetHub, BridgeHub...) on the relay chain.** #### [SiblingSystemParachainAsSuperuser](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/origin_conversion.rs#L94-L114) Not used anywhere in `polkadot-sdk` repo. ## Unresolved Questions - [ ] constants `LOWEST_USER_ID` and `LOWEST_PUBLIC_ID` seem to express the same thing now, do we want to keep them both or deprecated one of them? If so, which one? - [x] determine impact for `ChildSystemParachainAsSuperuser` ## TODO - [ ] when merged here, open PR to the `polkadot-fellows` ## Related Material https://youtu.be/CSO-ERHK2gY?t=456 https://forum.polkadot.network/t/polkadot-protocol-and-common-good-parachains/866 https://wiki.polkadot.network/docs/learn-system-chains --- .../collectives/collectives-polkadot/src/lib.rs | 2 +- .../runtimes/testing/penpal/src/xcm_config.rs | 2 +- polkadot/parachain/src/primitives.rs | 10 ++++------ polkadot/primitives/src/v5/mod.rs | 2 +- polkadot/runtime/kusama/src/lib.rs | 2 +- polkadot/runtime/kusama/src/xcm_config.rs | 11 ++++------- polkadot/runtime/polkadot/src/lib.rs | 2 +- polkadot/runtime/rococo/src/xcm_config.rs | 11 ++++------- polkadot/runtime/westend/src/xcm_config.rs | 9 ++++----- 9 files changed, 21 insertions(+), 30 deletions(-) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index d9125c2645ef..70e62a4dcdf9 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -20,7 +20,7 @@ //! //! ### Governance //! -//! As a common good parachain, Collectives defers its governance (namely, its `Root` origin), to +//! As a system parachain, Collectives defers its governance (namely, its `Root` origin), to //! its Relay Chain parent, Polkadot. //! //! ### Collator Selection diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 97d2e63370ea..f2ffc451b10e 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -163,7 +163,7 @@ pub type Barrier = TrailingSetTopicAsId< // If the message is one that immediately attemps to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, - // Common Good Assets parachain, parent and its exec plurality get free + // System Assets parachain, parent and its exec plurality get free // execution AllowExplicitUnpaidExecutionFrom<( CommonGoodAssetsParachain, diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 5cea9d3bbf4e..5f77810f5c23 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -199,13 +199,11 @@ impl From for Id { } } -const USER_INDEX_START: u32 = 1000; +// System parachain ID is considered `< 2000`. +const SYSTEM_INDEX_END: u32 = 1999; const PUBLIC_INDEX_START: u32 = 2000; -/// The ID of the first user (non-system) parachain. -pub const LOWEST_USER_ID: Id = Id(USER_INDEX_START); - -/// The ID of the first publicly registerable parachain. +/// The ID of the first publicly registrable parachain. pub const LOWEST_PUBLIC_ID: Id = Id(PUBLIC_INDEX_START); impl Id { @@ -223,7 +221,7 @@ pub trait IsSystem { impl IsSystem for Id { fn is_system(&self) -> bool { - self.0 < USER_INDEX_START + self.0 <= SYSTEM_INDEX_END } } diff --git a/polkadot/primitives/src/v5/mod.rs b/polkadot/primitives/src/v5/mod.rs index 30782f95611f..81743225403d 100644 --- a/polkadot/primitives/src/v5/mod.rs +++ b/polkadot/primitives/src/v5/mod.rs @@ -44,7 +44,7 @@ pub use polkadot_core_primitives::v2::{ // Export some polkadot-parachain primitives pub use polkadot_parachain_primitives::primitives::{ HeadData, HorizontalMessages, HrmpChannelId, Id, UpwardMessage, UpwardMessages, ValidationCode, - ValidationCodeHash, LOWEST_PUBLIC_ID, LOWEST_USER_ID, + ValidationCodeHash, LOWEST_PUBLIC_ID, }; use serde::{Deserialize, Serialize}; diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index fc9fd61790ea..0681db23bc25 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -643,7 +643,7 @@ impl pallet_staking::EraPayout for EraPayout { // all para-ids that are currently active. let auctioned_slots = Paras::parachains() .into_iter() - // all active para-ids that do not belong to a system or common good chain is the number + // all active para-ids that do not belong to a system chain is the number // of parachains that we should take into account for inflation. .filter(|i| *i >= LOWEST_PUBLIC_ID) .count() as u64; diff --git a/polkadot/runtime/kusama/src/xcm_config.rs b/polkadot/runtime/kusama/src/xcm_config.rs index 6a9cea22bbc2..59ea937e11c1 100644 --- a/polkadot/runtime/kusama/src/xcm_config.rs +++ b/polkadot/runtime/kusama/src/xcm_config.rs @@ -37,11 +37,10 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, IsConcrete, MintLocation, - OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, + IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; parameter_types! { @@ -95,8 +94,6 @@ type LocalOriginConverter = ( ChildParachainAsNative, // The AccountId32 location type can be expressed natively as a `Signed` origin. SignedAccountId32AsNative, - // A system child parachain, expressed as a Superuser, converts to the `Root` origin. - ChildSystemParachainAsSuperuser, ); parameter_types! { diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 6d06ee469333..441a5132eda8 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -552,7 +552,7 @@ impl pallet_staking::EraPayout for EraPayout { // all para-ids that are not active. let auctioned_slots = Paras::parachains() .into_iter() - // all active para-ids that do not belong to a system or common good chain is the number + // all active para-ids that do not belong to a system chain is the number // of parachains that we should take into account for inflation. .filter(|i| *i >= LOWEST_PUBLIC_ID) .count() as u64; diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index d561df14a027..288ee8400dcf 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -36,11 +36,10 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, IsChildSystemParachain, IsConcrete, - MintLocation, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, - TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, - WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, FixedWeightBounds, + IsChildSystemParachain, IsConcrete, MintLocation, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -80,8 +79,6 @@ type LocalOriginConverter = ( ChildParachainAsNative, // The AccountId32 location type can be expressed natively as a `Signed` origin. SignedAccountId32AsNative, - // A system child parachain, expressed as a Superuser, converts to the `Root` origin. - ChildSystemParachainAsSuperuser, ); parameter_types! { diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 264830c693ef..afa0733b4c9a 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -35,10 +35,10 @@ use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, IsConcrete, MintLocation, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, IsChildSystemParachain, + IsConcrete, MintLocation, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -74,7 +74,6 @@ type LocalOriginConverter = ( SovereignSignedViaLocation, ChildParachainAsNative, SignedAccountId32AsNative, - ChildSystemParachainAsSuperuser, ); /// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our From 614aa31bf7a4b0e04595835a80562ce5eac96c4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Mon, 18 Sep 2023 11:18:19 +0200 Subject: [PATCH 06/16] Implements a variable deposit base calculation for EPM signed submissions (#1547) **Note**: This is a lift-and-shift PR from the old substrate and polkadot repos, both PRs have been reviewed and audited (https://github.com/paritytech/substrate/pull/13983, https://github.com/paritytech/polkadot/pull/7140) --- This PR implements a generic `BaseDeposit` calculation for signed submissions, based on the size of the submission queue. It adds a new associated type to EPM's config, `type SignedDepositBase`, that implements `Convert>`, which is used to calculate the base deposit for signed submissions based on the size of the signed submissions queue. `struct GeometricDepositBase` implements the convert trait so that the deposit value increases as a geometric progression. The deposit base is calculated by `deposit_base = fixed_deposit_base * (1 + increase_factor)^n`, where `n` is the term of the progression (i.e. the number of signed submissions in the queue). `Fixed` and `Inc` generic params are getters for `Balance` and `IncreaseFactor` to compute the geometric progression. If `IncreaseFactor = 0`, then the signed deposit is constant and equal to `Fixed` regardless of the size of the queue. ### Runtime configs In Kusama, the progression with 10% increase without changing the current signed fixed deposit is: (term == size of the queue) Term 1: `1,333,333,332,000` Term 2: `1,333,333,332,000 * 1.10 = 1,466,666,665,200` Term 3: `1,333,333,332,000 * 1.10^2 = 1,613,333,331,200` Term 4: `1,333,333,332,000 * 1.10^3 = 1,774,666,664,320` Term 5: `1,333,333,332,000 * 1.10^4 = 1,952,133,330,752` Term 6: `1,333,333,332,000 * 1.10^5 = 2,147,346,663,827.20` Term 7: `1,333,333,332,000 * 1.10^6 = 2,362,081,330,210.92` Term 8: `1,333,333,332,000 * 1.10^7 = 2,598,289,463,231.01` Term 9: `1,333,333,332,000 * 1.10^8 = 2,858,118,409,554.11` Term 10: `1,333,333,332,000 * 1.10^9 = 3,143,930,250,509.52` Westend: Term 1: `2,000,000,000,000` Term 2: `2,000,000,000,000 * 1.10 = 2,200,000,000,000` Term 3: `2,000,000,000,000 * 1.10^2 = 2,420,000,000,000` Term 4: `2,000,000,000,000 * 1.10^3 = 2,662,000,000,000` Term 5: `2,000,000,000,000 * 1.10^4 = 2,928,200,000,000` Term 6: `2,000,000,000,000 * 1.10^5 = 3,221,020,000,000` Term 7: `2,000,000,000,000 * 1.10^6 = 3,543,122,000,000` Term 8: `2,000,000,000,000 * 1.10^7 = 3,897,434,200,000` Term 9: `2,000,000,000,000 * 1.10^8 = 4,287,177,620,000` Term 10: `2,000,000,000,000 * 1.10^9 = 4,715,895,382,000` and in Polkadot, the deposit increase is disabled in the current state of the PR, as the increase factor is 0% -- so nothing changes from the current behaviour. Closes https://github.com/paritytech-secops/srlabs_findings/issues/189 --- polkadot/runtime/kusama/src/lib.rs | 10 ++- polkadot/runtime/polkadot/src/lib.rs | 9 +- polkadot/runtime/westend/src/lib.rs | 10 ++- substrate/bin/node/runtime/src/lib.rs | 8 +- .../election-provider-multi-phase/src/lib.rs | 13 +-- .../election-provider-multi-phase/src/mock.rs | 34 +++++-- .../src/signed.rs | 89 +++++++++++++++++-- .../test-staking-e2e/src/mock.rs | 11 ++- 8 files changed, 150 insertions(+), 34 deletions(-) diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 0681db23bc25..5cf2b3b83b1f 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -93,7 +93,7 @@ use xcm::latest::Junction; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_election_provider_multi_phase::Call as EPMCall; +pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; #[cfg(feature = "std")] pub use pallet_staking::StakerStatus; use pallet_staking::UseValidatorsMap; @@ -511,7 +511,8 @@ parameter_types! { // signed config pub const SignedMaxSubmissions: u32 = 16; pub const SignedMaxRefunds: u32 = 16 / 4; - pub const SignedDepositBase: Balance = deposit(2, 0); + pub const SignedFixedDeposit: Balance = deposit(2, 0); + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub const SignedDepositByte: Balance = deposit(0, 10) / 1024; // Each good submission will get 1/10 KSM as reward pub SignedRewardBase: Balance = UNITS / 10; @@ -584,7 +585,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SignedMaxSubmissions = SignedMaxSubmissions; type SignedMaxRefunds = SignedMaxRefunds; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = SignedDepositByte; type SignedDepositWeight = (); type SignedMaxWeight = @@ -2484,7 +2486,7 @@ mod fees_tests { fn signed_deposit_is_sensible() { // ensure this number does not change, or that it is checked after each change. // a 1 MB solution should need around 0.16 KSM deposit - let deposit = SignedDepositBase::get() + (SignedDepositByte::get() * 1024 * 1024); + let deposit = SignedFixedDeposit::get() + (SignedDepositByte::get() * 1024 * 1024); assert_eq_error_rate!(deposit, UNITS * 167 / 100, UNITS / 100); } } diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 441a5132eda8..da27771af400 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -90,7 +90,7 @@ use xcm::latest::Junction; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_election_provider_multi_phase::Call as EPMCall; +pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; #[cfg(feature = "std")] pub use pallet_staking::StakerStatus; use pallet_staking::UseValidatorsMap; @@ -382,6 +382,8 @@ parameter_types! { // signed config pub const SignedMaxSubmissions: u32 = 16; pub const SignedMaxRefunds: u32 = 16 / 4; + pub const SignedFixedDeposit: Balance = deposit(2, 0); + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); // 40 DOTs fixed deposit.. pub const SignedDepositBase: Balance = deposit(2, 0); // 0.01 DOT per KB of solution data. @@ -456,7 +458,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SignedMaxSubmissions = SignedMaxSubmissions; type SignedMaxRefunds = SignedMaxRefunds; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = SignedDepositByte; type SignedDepositWeight = (); type SignedMaxWeight = @@ -2352,7 +2355,7 @@ mod test_fees { fn signed_deposit_is_sensible() { // ensure this number does not change, or that it is checked after each change. // a 1 MB solution should take (40 + 10) DOTs of deposit. - let deposit = SignedDepositBase::get() + (SignedDepositByte::get() * 1024 * 1024); + let deposit = SignedFixedDeposit::get() + (SignedDepositByte::get() * 1024 * 1024); assert_eq_error_rate!(deposit, 50 * DOLLARS, DOLLARS); } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 9a7e7c4548c8..5cb6cbbab20e 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -79,7 +79,7 @@ use sp_runtime::{ Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -90,7 +90,7 @@ use xcm::latest::Junction; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_election_provider_multi_phase::Call as EPMCall; +pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; #[cfg(feature = "std")] pub use pallet_staking::StakerStatus; use pallet_staking::UseValidatorsMap; @@ -481,7 +481,8 @@ parameter_types! { // signed config pub const SignedMaxSubmissions: u32 = 128; pub const SignedMaxRefunds: u32 = 128 / 4; - pub const SignedDepositBase: Balance = deposit(2, 0); + pub const SignedFixedDeposit: Balance = deposit(2, 0); + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub const SignedDepositByte: Balance = deposit(0, 10) / 1024; // Each good submission will get 1 WND as reward pub SignedRewardBase: Balance = 1 * UNITS; @@ -553,7 +554,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SignedMaxSubmissions = SignedMaxSubmissions; type SignedMaxRefunds = SignedMaxRefunds; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = SignedDepositByte; type SignedDepositWeight = (); type SignedMaxWeight = diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index a97f2b09118c..df8fb06467d9 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -58,7 +58,7 @@ pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Moment, Nonce}; use pallet_asset_conversion::{NativeOrAssetId, NativeOrAssetIdConverter}; use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600}; -use pallet_election_provider_multi_phase::SolutionAccuracyOf; +use pallet_election_provider_multi_phase::{GeometricDepositBase, SolutionAccuracyOf}; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; @@ -694,7 +694,8 @@ parameter_types! { // signed config pub const SignedRewardBase: Balance = 1 * DOLLARS; - pub const SignedDepositBase: Balance = 1 * DOLLARS; + pub const SignedFixedDeposit: Balance = 1 * DOLLARS; + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub const SignedDepositByte: Balance = 1 * CENTS; pub BetterUnsignedThreshold: Perbill = Perbill::from_rational(1u32, 10_000); @@ -822,7 +823,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerConfig = Self; type SignedMaxSubmissions = ConstU32<10>; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = SignedDepositByte; type SignedMaxRefunds = ConstU32<3>; type SignedDepositWeight = (); diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 0d751e3f9cb0..8b6e0827c715 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -279,8 +279,8 @@ use unsigned::VoterOf; pub use weights::WeightInfo; pub use signed::{ - BalanceOf, NegativeImbalanceOf, PositiveImbalanceOf, SignedSubmission, SignedSubmissionOf, - SignedSubmissions, SubmissionIndicesOf, + BalanceOf, GeometricDepositBase, NegativeImbalanceOf, PositiveImbalanceOf, SignedSubmission, + SignedSubmissionOf, SignedSubmissions, SubmissionIndicesOf, }; pub use unsigned::{Miner, MinerConfig}; @@ -572,6 +572,7 @@ pub mod pallet { use frame_election_provider_support::{InstantElectionProvider, NposSolver}; use frame_support::{pallet_prelude::*, traits::EstimateCallFee}; use frame_system::pallet_prelude::*; + use sp_runtime::traits::Convert; #[pallet::config] pub trait Config: frame_system::Config + SendTransactionTypes> { @@ -649,10 +650,6 @@ pub mod pallet { #[pallet::constant] type SignedRewardBase: Get>; - /// Base deposit for a signed solution. - #[pallet::constant] - type SignedDepositBase: Get>; - /// Per-byte deposit for a signed solution. #[pallet::constant] type SignedDepositByte: Get>; @@ -668,6 +665,10 @@ pub mod pallet { #[pallet::constant] type MaxWinners: Get; + /// Something that calculates the signed deposit base based on the signed submissions queue + /// size. + type SignedDepositBase: Convert>; + /// The maximum number of electing voters and electable targets to put in the snapshot. /// At the moment, snapshots are only over a single block, but once multi-block elections /// are introduced they will take place over multiple blocks. diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 05d151e51ecc..d4659eba5661 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -16,7 +16,7 @@ // limitations under the License. use super::*; -use crate::{self as multi_phase, unsigned::MinerConfig}; +use crate::{self as multi_phase, signed::GeometricDepositBase, unsigned::MinerConfig}; use frame_election_provider_support::{ bounds::{DataProviderBounds, ElectionBounds}, data_provider, onchain, ElectionDataProvider, NposSolution, SequentialPhragmen, @@ -44,8 +44,8 @@ use sp_npos_elections::{ use sp_runtime::{ bounded_vec, testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, PerU16, + traits::{BlakeTwo256, Convert, IdentityLookup}, + BuildStorage, PerU16, Percent, }; use std::sync::Arc; @@ -283,7 +283,11 @@ parameter_types! { pub static UnsignedPhase: BlockNumber = 5; pub static SignedMaxSubmissions: u32 = 5; pub static SignedMaxRefunds: u32 = 1; - pub static SignedDepositBase: Balance = 5; + // for tests only. if `EnableVariableDepositBase` is true, the deposit base will be calculated + // by `Multiphase::DepositBase`. Otherwise the deposit base is `SignedFixedDeposit`. + pub static EnableVariableDepositBase: bool = false; + pub static SignedFixedDeposit: Balance = 5; + pub static SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub static SignedDepositByte: Balance = 0; pub static SignedDepositWeight: Balance = 0; pub static SignedRewardBase: Balance = 7; @@ -393,7 +397,7 @@ impl crate::Config for Runtime { type OffchainRepeat = OffchainRepeat; type MinerTxPriority = MinerTxPriority; type SignedRewardBase = SignedRewardBase; - type SignedDepositBase = SignedDepositBase; + type SignedDepositBase = Self; type SignedDepositByte = (); type SignedDepositWeight = (); type SignedMaxWeight = SignedMaxWeight; @@ -414,6 +418,18 @@ impl crate::Config for Runtime { type ElectionBounds = ElectionsBounds; } +impl Convert> for Runtime { + /// returns the geometric increase deposit fee if `EnableVariableDepositBase` is set, otherwise + /// the fee is `SignedFixedDeposit`. + fn convert(queue_len: usize) -> Balance { + if !EnableVariableDepositBase::get() { + SignedFixedDeposit::get() + } else { + GeometricDepositBase::::convert(queue_len) + } + } +} + impl frame_system::offchain::SendTransactionTypes for Runtime where RuntimeCall: From, @@ -553,8 +569,14 @@ impl ExtBuilder { ::set(count); self } + pub fn signed_base_deposit(self, base: u64, variable: bool, increase: Percent) -> Self { + ::set(variable); + ::set(base); + ::set(increase); + self + } pub fn signed_deposit(self, base: u64, byte: u64, weight: u64) -> Self { - ::set(base); + ::set(base); ::set(byte); ::set(weight); self diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index 76068ba99d36..a5fe8ce55582 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -17,6 +17,8 @@ //! The signed phase implementation. +use core::marker::PhantomData; + use crate::{ unsigned::MinerConfig, Config, ElectionCompute, Pallet, QueuedSolution, RawSolution, ReadySolution, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, @@ -32,8 +34,8 @@ use sp_arithmetic::traits::SaturatedConversion; use sp_core::bounded::BoundedVec; use sp_npos_elections::ElectionScore; use sp_runtime::{ - traits::{Saturating, Zero}, - RuntimeDebug, + traits::{Convert, Saturating, Zero}, + FixedPointNumber, FixedPointOperand, FixedU128, Percent, RuntimeDebug, }; use sp_std::{ cmp::Ordering, @@ -348,6 +350,32 @@ impl SignedSubmissions { } } +/// Type that can be used to calculate the deposit base for signed submissions. +/// +/// The deposit base is calculated as a geometric progression based on the number of signed +/// submissions in the queue. The size of the queue represents the progression term. +pub struct GeometricDepositBase { + _marker: (PhantomData, PhantomData, PhantomData), +} + +impl Convert for GeometricDepositBase +where + Balance: FixedPointOperand, + Fixed: Get, + Inc: Get, +{ + // Calculates the base deposit as a geometric progression based on the number of signed + // submissions. + // + // The nth term is obtained by calculating `base * (1 + increase_factor)^nth`. Example: factor + // 5, with initial deposit of 1000 and 10% of increase factor is 1000 * (1 + 0.1)^5. + fn convert(queue_len: usize) -> Balance { + let increase_factor: FixedU128 = FixedU128::from_u32(1) + Inc::get().into(); + + increase_factor.saturating_pow(queue_len).saturating_mul_int(Fixed::get()) + } +} + impl Pallet { /// `Self` accessor for `SignedSubmission`. pub fn signed_submissions() -> SignedSubmissions { @@ -520,14 +548,14 @@ impl Pallet { size: SolutionOrSnapshotSize, ) -> BalanceOf { let encoded_len: u32 = raw_solution.encoded_size().saturated_into(); - let encoded_len: BalanceOf = encoded_len.into(); + let encoded_len_balance: BalanceOf = encoded_len.into(); let feasibility_weight = Self::solution_weight_of(raw_solution, size); - let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len); + let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len_balance); let weight_deposit = T::SignedDepositWeight::get() .saturating_mul(feasibility_weight.ref_time().saturated_into()); - T::SignedDepositBase::get() + T::SignedDepositBase::convert(Self::signed_submissions().len()) .saturating_add(len_deposit) .saturating_add(weight_deposit) } @@ -541,6 +569,7 @@ mod tests { Phase, }; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; + use sp_runtime::Percent; #[test] fn cannot_submit_too_early() { @@ -779,6 +808,56 @@ mod tests { }) } + #[test] + fn geometric_deposit_queue_size_works() { + let constant = vec![1000; 10]; + // geometric progression with 10% increase in each iteration for 10 terms. + let progression_10 = vec![1000, 1100, 1210, 1331, 1464, 1610, 1771, 1948, 2143, 2357]; + let progression_40 = vec![1000, 1400, 1960, 2744, 3841, 5378, 7529, 10541, 14757, 20661]; + + let check_progressive_base_fee = |expected: &Vec| { + for s in 0..SignedMaxSubmissions::get() { + let account = 99 + s as u64; + Balances::make_free_balance_be(&account, 10000000); + let mut solution = raw_solution(); + solution.score.minimal_stake -= s as u128; + + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(account), Box::new(solution))); + assert_eq!(balances(&account).1, expected[s as usize]) + } + }; + + ExtBuilder::default() + .signed_max_submission(10) + .signed_base_deposit(1000, true, Percent::from_percent(0)) + .build_and_execute(|| { + roll_to_signed(); + assert!(MultiPhase::current_phase().is_signed()); + + check_progressive_base_fee(&constant); + }); + + ExtBuilder::default() + .signed_max_submission(10) + .signed_base_deposit(1000, true, Percent::from_percent(10)) + .build_and_execute(|| { + roll_to_signed(); + assert!(MultiPhase::current_phase().is_signed()); + + check_progressive_base_fee(&progression_10); + }); + + ExtBuilder::default() + .signed_max_submission(10) + .signed_base_deposit(1000, true, Percent::from_percent(40)) + .build_and_execute(|| { + roll_to_signed(); + assert!(MultiPhase::current_phase().is_signed()); + + check_progressive_base_fee(&progression_40); + }); + } + #[test] fn call_fee_refund_is_limited_by_signed_max_refunds() { ExtBuilder::default().build_and_execute(|| { diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 501f9f89ab7a..ec646c311978 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -32,7 +32,7 @@ use sp_runtime::{ }, testing, traits::Zero, - transaction_validity, BuildStorage, PerU16, Perbill, + transaction_validity, BuildStorage, PerU16, Perbill, Percent, }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, @@ -47,7 +47,8 @@ use frame_election_provider_support::{ SequentialPhragmen, Weight, }; use pallet_election_provider_multi_phase::{ - unsigned::MinerConfig, Call, ElectionCompute, QueuedSolution, SolutionAccuracyOf, + unsigned::MinerConfig, Call, ElectionCompute, GeometricDepositBase, QueuedSolution, + SolutionAccuracyOf, }; use pallet_staking::StakerStatus; use parking_lot::RwLock; @@ -182,6 +183,9 @@ parameter_types! { pub static TransactionPriority: transaction_validity::TransactionPriority = 1; #[derive(Debug)] pub static MaxWinners: u32 = 100; + pub static MaxVotesPerVoter: u32 = 16; + pub static SignedFixedDeposit: Balance = 1; + pub static SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); pub static ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = ElectionBoundsBuilder::default() .voters_count(1_000.into()).targets_count(1_000.into()).build(); } @@ -199,7 +203,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type MinerConfig = Self; type SignedMaxSubmissions = ConstU32<10>; type SignedRewardBase = (); - type SignedDepositBase = (); + type SignedDepositBase = + GeometricDepositBase; type SignedDepositByte = (); type SignedMaxRefunds = ConstU32<3>; type SignedDepositWeight = (); From e05d3690bc6c2c1f5abc05a378753eda9e0fdaec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 11:51:48 +0200 Subject: [PATCH 07/16] Bump docker/setup-buildx-action from 2.1.0 to 3.0.0 (#1551) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2.1.0 to 3.0.0.
Release notes

Sourced from docker/setup-buildx-action's releases.

v3.0.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.10.0...v3.0.0

v2.10.0

What's Changed

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.9.1...v2.10.0

v2.9.1

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.9.0...v2.9.1

v2.9.0

  • Bump @​docker/actions-toolkit from 0.6.0 to 0.7.0 in docker/setup-buildx-action#246
    • Adds support to cache Buildx binary to hosted tool cache and GHA cache backend

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.8.0...v2.9.0

v2.8.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.7.0...v2.8.0

v2.7.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.6.0...v2.7.0

v2.6.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.5.0...v2.6.0

v2.5.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.4.1...v2.5.0

v2.4.1

... (truncated)

Commits
  • f95db51 Merge pull request #267 from docker/dependabot/npm_and_yarn/actions/core-1.10.1
  • 998a87c chore: update generated content
  • 28bae59 build(deps): bump @​actions/core from 1.10.0 to 1.10.1
  • c215341 Merge pull request #264 from crazy-max/update-node20
  • 02e9319 chore: node 20 as default runtime
  • 5c9160e chore: update generated content
  • 1283140 chore: fix author in package.json
  • c6afe06 vendor: bump @​docker/actions-toolkit from 0.10.0 to 0.12.0
  • f35e0d5 chore: update dev dependencies
  • baeb468 dev: remove unneeded binaries
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/setup-buildx-action&package-manager=github_actions&previous-version=2.1.0&new-version=3.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release-50_publish-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 04b3ebd3e79c..5cfe53e641df 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -242,7 +242,7 @@ jobs: uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0 + uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - name: Cache Docker layers uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 From 519a0f0688d8740c7944bdb7986a540cbf7e3917 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 18 Sep 2023 11:56:35 +0200 Subject: [PATCH 08/16] Replace secrets with the new ones (#1564) In the monorepo, secrets used in the various previous repos have been renamed into: - `CUMULUS_DOCKERHUB_USERNAME` - `CUMULUS_DOCKERHUB_TOKEN` - `POLKADOT_DOCKERHUB_USERNAME` - `POLKADOT_DOCKERHUB_TOKEN` This PR makes those changes and remove one of the GHW that has now been updated for the monorepo. --- .../workflows/release-50_publish-docker.yml | 8 ++-- .../release-50_publish-docker-release.yml | 44 ------------------- 2 files changed, 4 insertions(+), 48 deletions(-) delete mode 100644 polkadot/.github/workflows/release-50_publish-docker-release.yml diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 5cfe53e641df..535f54abc684 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -198,8 +198,8 @@ jobs: - name: Login to Dockerhub uses: docker/login-action@v2 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.CUMULUS_DOCKERHUB_USERNAME }} + password: ${{ secrets.CUMULUS_DOCKERHUB_TOKEN }} - name: Push Container image for ${{ env.BINARY }} id: docker_push @@ -255,8 +255,8 @@ jobs: - name: Login to Docker Hub uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.POLKADOT_DOCKERHUB_USERNAME }} + password: ${{ secrets.POLKADOT_DOCKERHUB_TOKEN }} - name: Fetch values id: fetch-data diff --git a/polkadot/.github/workflows/release-50_publish-docker-release.yml b/polkadot/.github/workflows/release-50_publish-docker-release.yml deleted file mode 100644 index 81e5caa718f3..000000000000 --- a/polkadot/.github/workflows/release-50_publish-docker-release.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Release - Publish Docker image for new releases - -on: - release: - types: - - published - -jobs: - main: - runs-on: ubuntu-latest - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0 - - name: Cache Docker layers - uses: actions/cache@v3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - name: Login to Dockerhub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push - id: docker_build - uses: docker/build-push-action@v4 - with: - push: true - file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile - tags: | - parity/polkadot:latest - parity/polkadot:${{ github.event.release.tag_name }} - build-args: | - POLKADOT_VERSION=${{ github.event.release.tag_name }} - VCS_REF=${{ github.ref }} - BUILD_DATE=${{ github.event.release.published_at }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - name: Image digest - run: echo ${{ steps.docker_build.outputs.digest }} From f14bf347e8c89744ff6b4b8e76548d1137dc9c34 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:02:15 +0200 Subject: [PATCH 09/16] Broker pallet: `RegionDropped` event fix & additional tests (#1609) This PR includes the following fix: - [x] The `duration` is always set to zero in the `RegionDropped` event. This is fixed in this PR. Also added some additional tests to cover some cases that aren't covered : - [x] Selling a partitioned region to the instantaneous coretime pool. - [x] Partitioning a region after assigning it to a particular task. - [x] Interlacing a region after assigning it to a particular task. --- .../frame/broker/src/dispatchable_impls.rs | 4 +- substrate/frame/broker/src/tests.rs | 119 +++++++++++++++++- substrate/frame/broker/src/utility_impls.rs | 2 +- 3 files changed, 119 insertions(+), 6 deletions(-) diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 54cf5d71dcad..0b08a7b665b7 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -76,7 +76,7 @@ impl Pallet { last_timeslice: Self::current_timeslice(), }; let now = frame_system::Pallet::::block_number(); - let dummy_sale = SaleInfoRecord { + let new_sale = SaleInfoRecord { sale_start: now, leadin_length: Zero::zero(), price, @@ -89,7 +89,7 @@ impl Pallet { cores_sold: 0, }; Self::deposit_event(Event::::SalesStarted { price, core_count }); - Self::rotate_sale(dummy_sale, &config, &status); + Self::rotate_sale(new_sale, &config, &status); Status::::put(&status); Ok(()) } diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index 3c326010dddf..e1b489bbe6e6 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -77,7 +77,9 @@ fn drop_renewal_works() { let e = Error::::StillValid; assert_noop!(Broker::do_drop_renewal(region.core, region.begin + 3), e); advance_to(12); + assert_eq!(AllowedRenewals::::iter().count(), 1); assert_ok!(Broker::do_drop_renewal(region.core, region.begin + 3)); + assert_eq!(AllowedRenewals::::iter().count(), 0); let e = Error::::UnknownRenewal; assert_noop!(Broker::do_drop_renewal(region.core, region.begin + 3), e); }); @@ -90,7 +92,10 @@ fn drop_contribution_works() { advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); // Place region in pool. Active in pool timeslices 4, 5, 6 = rcblocks 8, 10, 12; we - // expect the contribution record to timeout 3 timeslices following 7 = 10 + // expect the contribution record to timeout 3 timeslices following 7 = 14 + // + // Due to the contribution_timeout being configured for 3 timeslices, the contribution + // can only be discarded at timeslice 10, i.e. rcblock 20. assert_ok!(Broker::do_pool(region, Some(1), 1, Final)); assert_eq!(InstaPoolContribution::::iter().count(), 1); advance_to(19); @@ -378,6 +383,41 @@ fn instapool_partial_core_payouts_work() { }); } +#[test] +fn instapool_core_payouts_work_with_partitioned_region() { + TestExt::new().endow(1, 1000).execute_with(|| { + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + let region = Broker::do_purchase(1, u64::max_value()).unwrap(); + let (region1, region2) = Broker::do_partition(region, None, 2).unwrap(); + // `region1` duration is from rcblock 8 to rcblock 12. This means that the + // coretime purchased during this time period will be purchased from `region1` + // + // `region2` duration is from rcblock 12 to rcblock 14 and during this period + // coretime will be purchased from `region2`. + assert_ok!(Broker::do_pool(region1, None, 2, Final)); + assert_ok!(Broker::do_pool(region2, None, 3, Final)); + assert_ok!(Broker::do_purchase_credit(1, 20, 1)); + advance_to(8); + assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); + advance_to(11); + assert_eq!(pot(), 20); + assert_eq!(revenue(), 100); + assert_ok!(Broker::do_claim_revenue(region1, 100)); + assert_eq!(pot(), 10); + assert_eq!(balance(2), 10); + advance_to(12); + assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); + advance_to(15); + assert_eq!(pot(), 10); + assert_ok!(Broker::do_claim_revenue(region2, 100)); + assert_eq!(pot(), 0); + // The balance of account `2` remains unchanged. + assert_eq!(balance(2), 10); + assert_eq!(balance(3), 10); + }); +} + #[test] fn initialize_with_system_paras_works() { TestExt::new().execute_with(|| { @@ -654,6 +694,79 @@ fn partition_then_interlace_works() { }); } +#[test] +fn partitioning_after_assignment_works() { + TestExt::new().endow(1, 1000).execute_with(|| { + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + // We will initially allocate a task to a purchased region, and after that + // we will proceed to partition the region. + let region = Broker::do_purchase(1, u64::max_value()).unwrap(); + assert_ok!(Broker::do_assign(region, None, 1001, Provisional)); + let (_region, region1) = Broker::do_partition(region, None, 2).unwrap(); + // After the partitioning if we assign a new task to `region` the other region + // will still be assigned to `Task(1001)`. + assert_ok!(Broker::do_assign(region1, None, 1002, Provisional)); + advance_to(10); + assert_eq!( + CoretimeTrace::get(), + vec![ + ( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(Task(1001), 57600),], + end_hint: None + } + ), + ( + 10, + AssignCore { + core: 0, + begin: 12, + assignment: vec![(Task(1002), 57600),], + end_hint: None + } + ), + ] + ); + }); +} + +#[test] +fn interlacing_after_assignment_works() { + TestExt::new().endow(1, 1000).execute_with(|| { + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + // We will initially allocate a task to a purchased region, and after that + // we will proceed to interlace the region. + let region = Broker::do_purchase(1, u64::max_value()).unwrap(); + assert_ok!(Broker::do_assign(region, None, 1001, Provisional)); + let (region1, _region) = + Broker::do_interlace(region, None, CoreMask::from_chunk(0, 40)).unwrap(); + // Interlacing the region won't affect the assignment. The entire region will still + // be assigned to `Task(1001)`. + // + // However, after we assign a task to `region1` the `_region` won't be assigned + // to `Task(1001)` anymore. It will become idle. + assert_ok!(Broker::do_assign(region1, None, 1002, Provisional)); + advance_to(10); + assert_eq!( + CoretimeTrace::get(), + vec![( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(Idle, 28800), (Task(1002), 28800)], + end_hint: None + } + ),] + ); + }); +} + #[test] fn reservations_are_limited() { TestExt::new().execute_with(|| { @@ -866,7 +979,7 @@ fn assign_should_drop_invalid_region() { advance_to(10); assert_ok!(Broker::do_assign(region, Some(1), 1001, Provisional)); region.begin = 7; - System::assert_last_event(Event::RegionDropped { region_id: region, duration: 0 }.into()); + System::assert_last_event(Event::RegionDropped { region_id: region, duration: 3 }.into()); }); } @@ -879,7 +992,7 @@ fn pool_should_drop_invalid_region() { advance_to(10); assert_ok!(Broker::do_pool(region, Some(1), 1001, Provisional)); region.begin = 7; - System::assert_last_event(Event::RegionDropped { region_id: region, duration: 0 }.into()); + System::assert_last_event(Event::RegionDropped { region_id: region, duration: 3 }.into()); }); } diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index 99c4de32f776..2450198050b6 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -101,9 +101,9 @@ impl Pallet { let last_committed_timeslice = status.last_committed_timeslice; if region_id.begin <= last_committed_timeslice { + let duration = region.end.saturating_sub(region_id.begin); region_id.begin = last_committed_timeslice + 1; if region_id.begin >= region.end { - let duration = region.end.saturating_sub(region_id.begin); Self::deposit_event(Event::RegionDropped { region_id, duration }); return Ok(None) } From a50e6ba7af50a4be9ae78ebb90e86a61f3dd85e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:18:49 +0200 Subject: [PATCH 10/16] Bump docker/login-action from 2 to 3 (#1531) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/login-action](https://github.com/docker/login-action) from 2 to 3.
Release notes

Sourced from docker/login-action's releases.

v3.0.0

Full Changelog: https://github.com/docker/login-action/compare/v2.2.0...v3.0.0

v2.2.0

Full Changelog: https://github.com/docker/login-action/compare/v2.1.0...v2.2.0

v2.1.0

  • Ensure AWS temp credentials are redacted in workflow logs by @​crazy-max (#275)
  • Bump @​actions/core from 1.6.0 to 1.10.0 (#252 #292)
  • Bump @​aws-sdk/client-ecr from 3.53.0 to 3.186.0 (#298)
  • Bump @​aws-sdk/client-ecr-public from 3.53.0 to 3.186.0 (#299)

Full Changelog: https://github.com/docker/login-action/compare/v2.0.0...v2.1.0

Commits
  • 343f7c4 Merge pull request #599 from docker/dependabot/npm_and_yarn/aws-sdk-dependenc...
  • aad0f97 chore: update generated content
  • 2e0cd39 build(deps): bump the aws-sdk-dependencies group with 2 updates
  • 203bc9c Merge pull request #588 from docker/dependabot/npm_and_yarn/proxy-agent-depen...
  • 2199648 chore: update generated content
  • b489376 build(deps): bump the proxy-agent-dependencies group with 1 update
  • 7c309e7 Merge pull request #598 from docker/dependabot/npm_and_yarn/actions/core-1.10.1
  • 0ccf222 chore: update generated content
  • 56d703e Merge pull request #597 from docker/dependabot/github_actions/aws-actions/con...
  • 24d3b35 build(deps): bump @​actions/core from 1.10.0 to 1.10.1
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/login-action&package-manager=github_actions&previous-version=2&new-version=3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sergejs Kostjucenko <85877331+sergejparity@users.noreply.github.com> --- .github/workflows/release-50_publish-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 535f54abc684..e787d83616f9 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -196,7 +196,7 @@ jobs: ./docker/scripts/build-injected.sh - name: Login to Dockerhub - uses: docker/login-action@v2 + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 with: username: ${{ secrets.CUMULUS_DOCKERHUB_USERNAME }} password: ${{ secrets.CUMULUS_DOCKERHUB_TOKEN }} From 20052e1675f96fb25d5b19c26a4e1d5e7425e724 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:37:38 +0200 Subject: [PATCH 11/16] Bump docker/build-push-action from 4 to 5 (#1552) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4 to 5.
Release notes

Sourced from docker/build-push-action's releases.

v5.0.0

Full Changelog: https://github.com/docker/build-push-action/compare/v4.2.1...v5.0.0

v4.2.1

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.2.0...v4.2.1

v4.2.0

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.1.1...v4.2.0

v4.1.1

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.1.0...v4.1.1

v4.1.0

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.0.0...v4.1.0

Commits
  • 0565240 Merge pull request #959 from docker/dependabot/npm_and_yarn/actions/core-1.10.1
  • 3ab07f8 chore: update generated content
  • b9e7e4d chore(deps): Bump @​actions/core from 1.10.0 to 1.10.1
  • 04d1a3b Merge pull request #954 from crazy-max/update-node20
  • 1a4d1a1 chore: node 20 as default runtime
  • 675965c chore: update generated content
  • 58ee34c chore: fix author in package.json
  • c97c406 fix ProxyConfig type when checking length
  • 47d5369 vendor: bump @​docker/actions-toolkit from 0.8.0 to 0.12.0
  • 8895c74 chore: update dev dependencies
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=4&new-version=5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Chevdor --- .github/workflows/release-50_publish-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index e787d83616f9..d01d78631e19 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -266,7 +266,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 with: push: true file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile From 5d346643ca0d1b6d9ddcfb2d01e490858ce4cb0e Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:54:44 +0300 Subject: [PATCH 12/16] chainHead: Add support for storage closest merkle descendant #14818 (#1153) This PR adds support for fetching the closest merkle value of some key. Builds on top of - https://github.com/paritytech/trie/pull/199 Migrates https://github.com/paritytech/substrate/pull/14818 to the monorepo. Closes: https://github.com/paritytech/substrate/issues/14550 Closes: https://github.com/paritytech/polkadot-sdk/issues/1506 // @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: Sebastian Kunert --- Cargo.lock | 1 + substrate/client/api/Cargo.toml | 1 + substrate/client/api/src/backend.rs | 16 ++ substrate/client/db/src/bench.rs | 23 ++- substrate/client/db/src/lib.rs | 17 +- substrate/client/db/src/record_stats_state.rs | 16 ++ .../rpc-spec-v2/src/chain_head/chain_head.rs | 17 +- .../src/chain_head/chain_head_storage.rs | 40 +++- .../rpc-spec-v2/src/chain_head/test_utils.rs | 21 +- .../rpc-spec-v2/src/chain_head/tests.rs | 190 +++++++++++++++++- substrate/client/service/src/client/client.rs | 23 ++- .../primitives/state-machine/src/backend.rs | 14 +- .../state-machine/src/trie_backend.rs | 14 +- .../state-machine/src/trie_backend_essence.rs | 42 +++- substrate/primitives/trie/src/lib.rs | 42 +++- 15 files changed, 449 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f09db7643e08..ea4cf4a1817e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14641,6 +14641,7 @@ dependencies = [ "sp-statement-store", "sp-storage", "sp-test-primitives", + "sp-trie", "substrate-prometheus-endpoint", "substrate-test-runtime", "thiserror", diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index b59149424ed3..2b64c86038dd 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -35,6 +35,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-state-machine = { path = "../../primitives/state-machine" } sp-statement-store = { path = "../../primitives/statement-store" } sp-storage = { path = "../../primitives/storage" } +sp-trie = { path = "../../primitives/trie" } [dev-dependencies] thiserror = "1.0.48" diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index 2d8fdef77cdb..31b100433c70 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -33,6 +33,7 @@ use sp_state_machine::{ OffchainChangesCollection, StorageCollection, StorageIterator, }; use sp_storage::{ChildInfo, StorageData, StorageKey}; +pub use sp_trie::MerkleValue; use crate::{blockchain::Backend as BlockchainBackend, UsageInfo}; @@ -470,6 +471,21 @@ pub trait StorageProvider> { child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; + + /// Given a block's `Hash` and a key, return the closest merkle value. + fn closest_merkle_value( + &self, + hash: Block::Hash, + key: &StorageKey, + ) -> sp_blockchain::Result>>; + + /// Given a block's `Hash`, a key and a child storage key, return the closest merkle value. + fn child_closest_merkle_value( + &self, + hash: Block::Hash, + child_info: &ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result>>; } /// Client backend. diff --git a/substrate/client/db/src/bench.rs b/substrate/client/db/src/bench.rs index 38c37a42ede7..03ad4817b53b 100644 --- a/substrate/client/db/src/bench.rs +++ b/substrate/client/db/src/bench.rs @@ -37,7 +37,7 @@ use sp_state_machine::{ }; use sp_trie::{ cache::{CacheSize, SharedTrieCache}, - prefixed_key, MemoryDB, + prefixed_key, MemoryDB, MerkleValue, }; use std::{ cell::{Cell, RefCell}, @@ -382,6 +382,27 @@ impl StateBackend> for BenchmarkingState { .child_storage_hash(child_info, key) } + fn closest_merkle_value( + &self, + key: &[u8], + ) -> Result>, Self::Error> { + self.add_read_key(None, key); + self.state.borrow().as_ref().ok_or_else(state_err)?.closest_merkle_value(key) + } + + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.add_read_key(None, key); + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .child_closest_merkle_value(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 73fb4f8ce6db..194bec8a88eb 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -90,7 +90,7 @@ use sp_state_machine::{ OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey, StorageValue, UsageInfo as StateUsageInfo, }; -use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB}; +use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB}; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; @@ -214,6 +214,21 @@ impl StateBackend> for RefTrackingState { self.state.child_storage_hash(child_info, key) } + fn closest_merkle_value( + &self, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.closest_merkle_value(key) + } + + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.child_closest_merkle_value(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } diff --git a/substrate/client/db/src/record_stats_state.rs b/substrate/client/db/src/record_stats_state.rs index 29ece84f97e5..d9a35c075d79 100644 --- a/substrate/client/db/src/record_stats_state.rs +++ b/substrate/client/db/src/record_stats_state.rs @@ -28,6 +28,7 @@ use sp_state_machine::{ backend::{AsTrieBackend, Backend as StateBackend}, BackendTransaction, IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend, }; +use sp_trie::MerkleValue; use std::sync::Arc; /// State abstraction for recording stats about state access. @@ -144,6 +145,21 @@ impl>, B: BlockT> StateBackend> self.state.child_storage_hash(child_info, key) } + fn closest_merkle_value( + &self, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.closest_merkle_value(key) + } + + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.child_closest_merkle_value(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 14364c331e6c..a8c1c4f7e083 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -27,7 +27,7 @@ use crate::{ api::ChainHeadApiServer, chain_head_follow::ChainHeadFollower, error::Error as ChainHeadRpcError, - event::{FollowEvent, MethodResponse, OperationError, StorageQuery, StorageQueryType}, + event::{FollowEvent, MethodResponse, OperationError, StorageQuery}, hex_string, subscription::{SubscriptionManagement, SubscriptionManagementError}, }, @@ -329,19 +329,10 @@ where let items = items .into_iter() .map(|query| { - if query.query_type == StorageQueryType::ClosestDescendantMerkleValue { - // Note: remove this once all types are implemented. - return Err(ChainHeadRpcError::InvalidParam( - "Storage query type not supported".into(), - )) - } - - Ok(StorageQuery { - key: StorageKey(parse_hex_param(query.key)?), - query_type: query.query_type, - }) + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) }) - .collect::, _>>()?; + .collect::, ChainHeadRpcError>>()?; let child_trie = child_trie .map(|child_trie| parse_hex_param(child_trie)) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs index 48a673f47e3b..7095548a2b16 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -145,6 +145,36 @@ where .unwrap_or_else(|error| QueryResult::Err(error.to_string())) } + /// Fetch the closest merkle value. + fn query_storage_merkle_value( + &self, + hash: Block::Hash, + key: &StorageKey, + child_key: Option<&ChildInfo>, + ) -> QueryResult { + let result = if let Some(child_key) = child_key { + self.client.child_closest_merkle_value(hash, child_key, key) + } else { + self.client.closest_merkle_value(hash, key) + }; + + result + .map(|opt| { + QueryResult::Ok(opt.map(|storage_data| { + let result = match &storage_data { + sc_client_api::MerkleValue::Node(data) => hex_string(&data.as_slice()), + sc_client_api::MerkleValue::Hash(hash) => hex_string(&hash.as_ref()), + }; + + StorageResult { + key: hex_string(&key.0), + result: StorageResultType::ClosestDescendantMerkleValue(result), + } + })) + }) + .unwrap_or_else(|error| QueryResult::Err(error.to_string())) + } + /// Iterate over at most `operation_max_storage_items` keys. /// /// Returns the storage result with a potential next key to resume iteration. @@ -286,13 +316,21 @@ where return }, }, + StorageQueryType::ClosestDescendantMerkleValue => + match self.query_storage_merkle_value(hash, &item.key, child_key.as_ref()) { + Ok(Some(value)) => storage_results.push(value), + Ok(None) => continue, + Err(error) => { + send_error::(&sender, operation.operation_id(), error); + return + }, + }, StorageQueryType::DescendantsValues => self .iter_operations .push_back(QueryIter { next_key: item.key, ty: IterQueryType::Value }), StorageQueryType::DescendantsHashes => self .iter_operations .push_back(QueryIter { next_key: item.key, ty: IterQueryType::Hash }), - _ => continue, }; } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index 6e92e87608b4..a901f3039ffe 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -20,8 +20,8 @@ use parking_lot::Mutex; use sc_client_api::{ execution_extensions::ExecutionExtensions, BlockBackend, BlockImportNotification, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, FinalityNotification, - FinalityNotifications, FinalizeSummary, ImportNotifications, KeysIter, PairsIter, StorageData, - StorageEventStream, StorageKey, StorageProvider, + FinalityNotifications, FinalizeSummary, ImportNotifications, KeysIter, MerkleValue, PairsIter, + StorageData, StorageEventStream, StorageKey, StorageProvider, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_api::{CallApiAt, CallApiAtParams, NumberFor, RuntimeVersion}; @@ -198,6 +198,23 @@ impl< ) -> sp_blockchain::Result> { self.client.child_storage_hash(hash, child_info, key) } + + fn closest_merkle_value( + &self, + hash: Block::Hash, + key: &StorageKey, + ) -> sp_blockchain::Result>> { + self.client.closest_merkle_value(hash, key) + } + + fn child_closest_merkle_value( + &self, + hash: Block::Hash, + child_info: &ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result>> { + self.client.child_closest_merkle_value(hash, child_info, key) + } } impl> CallApiAt for ChainHeadMockClient { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 1336cff84b6f..3ab47991c4e5 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -43,7 +43,12 @@ use sp_core::{ Blake2Hasher, Hasher, }; use sp_version::RuntimeVersion; -use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + sync::Arc, + time::Duration, +}; use substrate_test_runtime::Transfer; use substrate_test_runtime_client::{ prelude::*, runtime, runtime::RuntimeApi, Backend, BlockBuilderExt, Client, @@ -2583,3 +2588,186 @@ async fn stop_storage_operation() { ) .await; } + +#[tokio::test] +async fn storage_closest_merkle_value() { + let (mut client, api, mut sub, sub_id, _) = setup_api().await; + + /// The core of this test. + /// + /// Checks keys that are exact match, keys with descedant and keys that should not return + /// values. + /// + /// Returns (key, merkle value) pairs. + async fn expect_merkle_request( + api: &RpcModule>>, + mut sub: &mut RpcSubscription, + sub_id: String, + block_hash: String, + ) -> HashMap { + // Valid call with storage at the keys. + let response: MethodResponse = api + .call( + "chainHead_unstable_storage", + rpc_params![ + &sub_id, + &block_hash, + vec![ + StorageQuery { + key: hex_string(b":AAAA"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + StorageQuery { + key: hex_string(b":AAAB"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + // Key with descedent. + StorageQuery { + key: hex_string(b":A"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + StorageQuery { + key: hex_string(b":AA"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + // Keys below this comment do not produce a result. + // Key that exceed the keyspace of the trie. + StorageQuery { + key: hex_string(b":AAAAX"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + StorageQuery { + key: hex_string(b":AAABX"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + // Key that are not part of the trie. + StorageQuery { + key: hex_string(b":AAX"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + StorageQuery { + key: hex_string(b":AAAX"), + query_type: StorageQueryType::ClosestDescendantMerkleValue + }, + ] + ], + ) + .await + .unwrap(); + let operation_id = match response { + MethodResponse::Started(started) => started.operation_id, + MethodResponse::LimitReached => panic!("Expected started response"), + }; + + let event = get_next_event::>(&mut sub).await; + let merkle_values: HashMap<_, _> = match event { + FollowEvent::OperationStorageItems(res) => { + assert_eq!(res.operation_id, operation_id); + + res.items + .into_iter() + .map(|res| { + let value = match res.result { + StorageResultType::ClosestDescendantMerkleValue(value) => value, + _ => panic!("Unexpected StorageResultType"), + }; + (res.key, value) + }) + .collect() + }, + _ => panic!("Expected OperationStorageItems event"), + }; + + // Finished. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); + + // Response for AAAA, AAAB, A and AA. + assert_eq!(merkle_values.len(), 4); + + // While checking for expected merkle values to align, + // the following will check that the returned keys are + // expected. + + // Values for AAAA and AAAB are different. + assert_ne!( + merkle_values.get(&hex_string(b":AAAA")).unwrap(), + merkle_values.get(&hex_string(b":AAAB")).unwrap() + ); + + // Values for A and AA should be on the same branch node. + assert_eq!( + merkle_values.get(&hex_string(b":A")).unwrap(), + merkle_values.get(&hex_string(b":AA")).unwrap() + ); + // The branch node value must be different than the leaf of either + // AAAA and AAAB. + assert_ne!( + merkle_values.get(&hex_string(b":A")).unwrap(), + merkle_values.get(&hex_string(b":AAAA")).unwrap() + ); + assert_ne!( + merkle_values.get(&hex_string(b":A")).unwrap(), + merkle_values.get(&hex_string(b":AAAB")).unwrap() + ); + + merkle_values + } + + // Import a new block with storage changes. + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap(); + builder.push_storage_change(b":AAAB".to_vec(), Some(vec![2; 64])).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + let merkle_values_lhs = expect_merkle_request(&api, &mut sub, sub_id.clone(), block_hash).await; + + // Import a new block with and change AAAB value. + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap(); + builder.push_storage_change(b":AAAB".to_vec(), Some(vec![3; 64])).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + let merkle_values_rhs = expect_merkle_request(&api, &mut sub, sub_id.clone(), block_hash).await; + + // Change propagated to the root. + assert_ne!( + merkle_values_lhs.get(&hex_string(b":A")).unwrap(), + merkle_values_rhs.get(&hex_string(b":A")).unwrap() + ); + assert_ne!( + merkle_values_lhs.get(&hex_string(b":AAAB")).unwrap(), + merkle_values_rhs.get(&hex_string(b":AAAB")).unwrap() + ); + // However the AAAA branch leaf remains unchanged. + assert_eq!( + merkle_values_lhs.get(&hex_string(b":AAAA")).unwrap(), + merkle_values_rhs.get(&hex_string(b":AAAA")).unwrap() + ); +} diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 09c1673884aa..26dcd0f9e21a 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -78,7 +78,7 @@ use sp_state_machine::{ ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection, MAX_NESTED_TRIE_DEPTH, }; -use sp_trie::{CompactProof, StorageProof}; +use sp_trie::{CompactProof, MerkleValue, StorageProof}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -1545,6 +1545,27 @@ where .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } + + fn closest_merkle_value( + &self, + hash: ::Hash, + key: &StorageKey, + ) -> blockchain::Result::Hash>>> { + self.state_at(hash)? + .closest_merkle_value(&key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) + } + + fn child_closest_merkle_value( + &self, + hash: ::Hash, + child_info: &ChildInfo, + key: &StorageKey, + ) -> blockchain::Result::Hash>>> { + self.state_at(hash)? + .child_closest_merkle_value(child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) + } } impl HeaderMetadata for Client diff --git a/substrate/primitives/state-machine/src/backend.rs b/substrate/primitives/state-machine/src/backend.rs index 2a25bdc54d94..ea9cd442d70b 100644 --- a/substrate/primitives/state-machine/src/backend.rs +++ b/substrate/primitives/state-machine/src/backend.rs @@ -30,7 +30,7 @@ use sp_core::storage::{ChildInfo, StateVersion, TrackedStorageKey}; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; use sp_std::vec::Vec; -use sp_trie::PrefixedMemoryDB; +use sp_trie::{MerkleValue, PrefixedMemoryDB}; /// A struct containing arguments for iterating over the storage. #[derive(Default)] @@ -195,7 +195,17 @@ pub trait Backend: sp_std::fmt::Debug { /// Get keyed storage value hash or None if there is nothing associated. fn storage_hash(&self, key: &[u8]) -> Result, Self::Error>; - /// Get keyed child storage or None if there is nothing associated. + /// Get the merkle value or None if there is nothing associated. + fn closest_merkle_value(&self, key: &[u8]) -> Result>, Self::Error>; + + /// Get the child merkle value or None if there is nothing associated. + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error>; + + /// Get child keyed child storage or None if there is nothing associated. fn child_storage( &self, child_info: &ChildInfo, diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index cc7132181f90..afa80addd2ba 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -30,7 +30,6 @@ use codec::Codec; use hash_db::HashDB; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion}; -use sp_trie::PrefixedMemoryDB; #[cfg(feature = "std")] use sp_trie::{ cache::{LocalTrieCache, TrieCache}, @@ -39,6 +38,7 @@ use sp_trie::{ }; #[cfg(not(feature = "std"))] use sp_trie::{Error, NodeCodec}; +use sp_trie::{MerkleValue, PrefixedMemoryDB}; use trie_db::TrieCache as TrieCacheT; #[cfg(not(feature = "std"))] use trie_db::{node::NodeOwned, CachedValue}; @@ -405,6 +405,18 @@ where self.essence.child_storage(child_info, key) } + fn closest_merkle_value(&self, key: &[u8]) -> Result>, Self::Error> { + self.essence.closest_merkle_value(key) + } + + fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.essence.child_closest_merkle_value(child_info, key) + } + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { let (is_cached, mut cache) = access_cache(&self.next_storage_key_cache, Option::take) .map(|cache| (cache.last_key == key, cache)) diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 4bb51f4a1343..ad7aeab899c8 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -32,11 +32,12 @@ use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; #[cfg(feature = "std")] use sp_trie::recorder::Recorder; use sp_trie::{ - child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash, - read_child_trie_value, read_trie_value, + child_delta_trie_root, delta_trie_root, empty_child_trie_root, + read_child_trie_first_descedant_value, read_child_trie_hash, read_child_trie_value, + read_trie_first_descedant_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, - DBValue, KeySpacedDB, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, TrieDBRawIterator, - TrieRecorder, + DBValue, KeySpacedDB, MerkleValue, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, + TrieDBRawIterator, TrieRecorder, }; #[cfg(feature = "std")] use std::{collections::HashMap, sync::Arc}; @@ -574,6 +575,39 @@ where }) } + /// Get the closest merkle value at given key. + pub fn closest_merkle_value(&self, key: &[u8]) -> Result>> { + let map_e = |e| format!("Trie lookup error: {}", e); + + self.with_recorder_and_cache(None, |recorder, cache| { + read_trie_first_descedant_value::, _>(self, &self.root, key, recorder, cache) + .map_err(map_e) + }) + } + + /// Get the child closest merkle value at given key. + pub fn child_closest_merkle_value( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>> { + let Some(child_root) = self.child_root(child_info)? else { return Ok(None) }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + self.with_recorder_and_cache(Some(child_root), |recorder, cache| { + read_child_trie_first_descedant_value::, _>( + child_info.keyspace(), + self, + &child_root, + key, + recorder, + cache, + ) + .map_err(map_e) + }) + } + /// Create a raw iterator over the storage. pub fn raw_iter(&self, args: IterArgs) -> Result> { let root = if let Some(child_info) = args.child_info.as_ref() { diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 94155458569b..1a1ed670454d 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -44,7 +44,6 @@ pub use storage_proof::{CompactProof, StorageProof}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; -pub use trie_db::proof::VerifyError; use trie_db::proof::{generate_proof, verify_proof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ @@ -53,6 +52,7 @@ pub use trie_db::{ CError, DBValue, Query, Recorder, Trie, TrieCache, TrieConfiguration, TrieDBIterator, TrieDBKeyIterator, TrieDBRawIterator, TrieLayout, TrieMut, TrieRecorder, }; +pub use trie_db::{proof::VerifyError, MerkleValue}; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; @@ -295,6 +295,25 @@ pub fn read_trie_value( + db: &DB, + root: &TrieHash, + key: &[u8], + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>>, Box>> +where + DB: hash_db::HashDBRef, +{ + TrieDBBuilder::::new(db, root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() + .lookup_first_descendant(key) +} + /// Read a value from the trie with given Query. pub fn read_trie_value_with< L: TrieLayout, @@ -397,6 +416,27 @@ where .get_hash(key) } +/// Read the [`trie_db::MerkleValue`] of the node that is the closest descendant for +/// the provided child key. +pub fn read_child_trie_first_descedant_value( + keyspace: &[u8], + db: &DB, + root: &TrieHash, + key: &[u8], + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>>, Box>> +where + DB: hash_db::HashDBRef, +{ + let db = KeySpacedDB::new(db, keyspace); + TrieDBBuilder::::new(&db, &root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build() + .lookup_first_descendant(key) +} + /// Read a value from the child trie with given query. pub fn read_child_trie_value_with( keyspace: &[u8], From 372929fa256f5839047bb5d569ad523cb4b15094 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:49:39 +0200 Subject: [PATCH 13/16] Bump the known_good_semver group with 1 update (#1606) Bumps the known_good_semver group with 1 update: [syn](https://github.com/dtolnay/syn).
Release notes

Sourced from syn's releases.

2.0.36

  • Restore compatibility with --generate-link-to-definition documentation builds (#1514)

2.0.35

  • Make rust-analyzer produce preferred brackets for invocations of Token! macro (#1510, #1512)

2.0.34

  • Documentation improvements
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=syn&package-manager=cargo&previous-version=2.0.33&new-version=2.0.36)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 100 +++++++++--------- .../parachain-system/proc-macro/Cargo.toml | 2 +- polkadot/node/gum/proc-macro/Cargo.toml | 2 +- polkadot/xcm/procedural/Cargo.toml | 2 +- substrate/client/chain-spec/derive/Cargo.toml | 2 +- .../client/tracing/proc-macro/Cargo.toml | 2 +- .../frame/contracts/proc-macro/Cargo.toml | 2 +- .../solution-type/Cargo.toml | 2 +- .../frame/staking/reward-curve/Cargo.toml | 2 +- substrate/frame/support/procedural/Cargo.toml | 2 +- .../frame/support/procedural/tools/Cargo.toml | 2 +- .../procedural/tools/derive/Cargo.toml | 2 +- .../primitives/api/proc-macro/Cargo.toml | 2 +- .../core/hashing/proc-macro/Cargo.toml | 2 +- substrate/primitives/debug-derive/Cargo.toml | 2 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../primitives/version/proc-macro/Cargo.toml | 2 +- 17 files changed, 66 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea4cf4a1817e..a5ea33e1ae49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1138,7 +1138,7 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -1160,7 +1160,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -1177,7 +1177,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -1351,7 +1351,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -2514,7 +2514,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -3574,7 +3574,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4057,7 +4057,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4097,7 +4097,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4114,7 +4114,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4412,7 +4412,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4474,7 +4474,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.33", + "syn 2.0.36", "termcolor", "toml 0.7.6", "walkdir", @@ -4695,7 +4695,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4706,7 +4706,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -4851,7 +4851,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -5207,7 +5207,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.33", + "syn 2.0.36", "trybuild", ] @@ -5359,7 +5359,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -5370,7 +5370,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -5379,7 +5379,7 @@ version = "3.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -5602,7 +5602,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -7612,7 +7612,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -7626,7 +7626,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -7637,7 +7637,7 @@ checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -7648,7 +7648,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -9334,7 +9334,7 @@ version = "4.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -10405,7 +10405,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -11272,7 +11272,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -11313,7 +11313,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -13292,7 +13292,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -13374,7 +13374,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -13420,7 +13420,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -13811,7 +13811,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -14574,7 +14574,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -15785,7 +15785,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -16139,7 +16139,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -16205,7 +16205,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -16633,7 +16633,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -17033,7 +17033,7 @@ version = "9.0.0" dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -17077,7 +17077,7 @@ version = "8.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -17308,7 +17308,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -17548,7 +17548,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -18323,9 +18323,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "91e02e55d62894af2a08aca894c6577281f76769ba47c94d5756bec8ac6e7373" dependencies = [ "proc-macro2", "quote", @@ -18570,7 +18570,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -18750,7 +18750,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -18931,7 +18931,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -18974,7 +18974,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -19523,7 +19523,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", "wasm-bindgen-shared", ] @@ -19557,7 +19557,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -20693,7 +20693,7 @@ dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] @@ -20812,7 +20812,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.36", ] [[package]] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index 77e298363c85..ee0943bb99ee 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -9,7 +9,7 @@ description = "Proc macros provided by the parachain-system pallet" proc-macro = true [dependencies] -syn = "2.0.33" +syn = "2.0.36" proc-macro2 = "1.0.64" quote = "1.0.33" proc-macro-crate = "1.3.1" diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 0d6cee2ccf0a..be302e46ad90 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.33", features = ["full", "extra-traits"] } +syn = { version = "2.0.36", features = ["full", "extra-traits"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 3e137c42843b..6beaa1d667f0 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -11,5 +11,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.33" +syn = "2.0.36" Inflector = "0.11.4" diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index ff1ce5141e69..f0ad4c68d1f0 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.33" +syn = "2.0.36" diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index bf45111de88c..270f34b6d04b 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.33", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.36", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index 8779f97b9cde..ccc80a2eba4d 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full"] } +syn = { version = "2.0.36", features = ["full"] } [dev-dependencies] diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 50381d838696..1b432204470a 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.33", features = ["full", "visit"] } +syn = { version = "2.0.36", features = ["full", "visit"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index fc1f1b4b3ee8..7646bbc9a55d 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "visit"] } +syn = { version = "2.0.36", features = ["full", "visit"] } [dev-dependencies] sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index b582457e4b8d..e16068546056 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -21,7 +21,7 @@ cfg-expr = "0.15.5" itertools = "0.10.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full"] } +syn = { version = "2.0.36", features = ["full"] } frame-support-procedural-tools = { path = "tools" } proc-macro-warning = { version = "0.4.2", default-features = false } macro_magic = { version = "0.4.2", features = ["proc_support"] } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index 211dc3bd66a5..fb0a1b51cbcf 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,5 +15,5 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "visit", "extra-traits"] } +syn = { version = "2.0.36", features = ["full", "visit", "extra-traits"] } frame-support-procedural-tools-derive = { path = "derive" } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index 472e288c3df8..747d3bacd425 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -17,4 +17,4 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.33", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.36", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index d53b9b702e7b..71f1ff95d555 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.36", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "1.1.3" diff --git a/substrate/primitives/core/hashing/proc-macro/Cargo.toml b/substrate/primitives/core/hashing/proc-macro/Cargo.toml index 5e7cd9e3a7a4..fce09b452e5d 100644 --- a/substrate/primitives/core/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/core/hashing/proc-macro/Cargo.toml @@ -17,5 +17,5 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "parsing"] } +syn = { version = "2.0.36", features = ["full", "parsing"] } sp-core-hashing = { path = "..", default-features = false} diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index 82f4e8cf6048..689a12505694 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = "2.0.33" +syn = "2.0.36" proc-macro2 = "1.0.56" [features] diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 0884efc56d4e..fe06c56d5a15 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -20,4 +20,4 @@ Inflector = "0.11.4" proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "visit", "fold", "extra-traits"] } +syn = { version = "2.0.36", features = ["full", "visit", "fold", "extra-traits"] } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index a3478e3e5ca9..3cd1b7a3a76d 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -19,7 +19,7 @@ proc-macro = true codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive" ] } proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.33", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.36", features = ["full", "fold", "extra-traits", "visit"] } [dev-dependencies] sp-version = { path = ".." } From 8900d5b23a8e0be100552801207e174c47821ad6 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 18 Sep 2023 16:08:57 +0200 Subject: [PATCH 14/16] Move ISSUE_TEMPLATE (#1567) This PR moves the `ISSUE_TEMPLATE` to the root and removes the old ones. --- .../ISSUE_TEMPLATE/blank.md | 0 .../ISSUE_TEMPLATE/bug_report.yaml | 5 +- .../ISSUE_TEMPLATE/config.yml | 0 .../ISSUE_TEMPLATE/feature.yaml | 2 +- .../.github/ISSUE_TEMPLATE/release-client.md | 20 ------- .../.github/ISSUE_TEMPLATE/release-runtime.md | 54 ------------------- polkadot/.github/ISSUE_TEMPLATE/bug_report.md | 13 ----- polkadot/.github/ISSUE_TEMPLATE/release.md | 52 ------------------ 8 files changed, 4 insertions(+), 142 deletions(-) rename {cumulus/.github => .github}/ISSUE_TEMPLATE/blank.md (100%) rename substrate/.github/ISSUE_TEMPLATE/bug.yaml => .github/ISSUE_TEMPLATE/bug_report.yaml (92%) rename {substrate/.github => .github}/ISSUE_TEMPLATE/config.yml (100%) rename {substrate/.github => .github}/ISSUE_TEMPLATE/feature.yaml (98%) delete mode 100644 cumulus/.github/ISSUE_TEMPLATE/release-client.md delete mode 100644 cumulus/.github/ISSUE_TEMPLATE/release-runtime.md delete mode 100644 polkadot/.github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 polkadot/.github/ISSUE_TEMPLATE/release.md diff --git a/cumulus/.github/ISSUE_TEMPLATE/blank.md b/.github/ISSUE_TEMPLATE/blank.md similarity index 100% rename from cumulus/.github/ISSUE_TEMPLATE/blank.md rename to .github/ISSUE_TEMPLATE/blank.md diff --git a/substrate/.github/ISSUE_TEMPLATE/bug.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml similarity index 92% rename from substrate/.github/ISSUE_TEMPLATE/bug.yaml rename to .github/ISSUE_TEMPLATE/bug_report.yaml index ae40df08eca7..f828a5d9d893 100644 --- a/substrate/.github/ISSUE_TEMPLATE/bug.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -1,6 +1,7 @@ name: Bug Report description: Let us know about an issue you experienced with this software -# labels: ["some existing label","another one"] +labels: [ I2-bug, I10-unconfirmed ] + body: - type: checkboxes attributes: @@ -20,7 +21,7 @@ body: id: bug attributes: label: Description of bug - # description: What seems to be the problem? + description: What seems to be the problem? # placeholder: Describe the problem. validations: required: true diff --git a/substrate/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml similarity index 100% rename from substrate/.github/ISSUE_TEMPLATE/config.yml rename to .github/ISSUE_TEMPLATE/config.yml diff --git a/substrate/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml similarity index 98% rename from substrate/.github/ISSUE_TEMPLATE/feature.yaml rename to .github/ISSUE_TEMPLATE/feature.yaml index 6a59522ab4b4..828e8b461ccc 100644 --- a/substrate/.github/ISSUE_TEMPLATE/feature.yaml +++ b/.github/ISSUE_TEMPLATE/feature.yaml @@ -1,6 +1,6 @@ name: Feature Request description: Submit your requests and suggestions to improve! -labels: ["J0-enhancement"] +labels: [ I5-enhancement ] body: - type: checkboxes id: existing diff --git a/cumulus/.github/ISSUE_TEMPLATE/release-client.md b/cumulus/.github/ISSUE_TEMPLATE/release-client.md deleted file mode 100644 index bb7f20615767..000000000000 --- a/cumulus/.github/ISSUE_TEMPLATE/release-client.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Release Checklist for Client -about: Release Checklist for Client -title: Release Checklist for Client {{ env.VERSION }} ---- - -# Release Checklist - Client - -### Client Release - -- [ ] build a new `polkadot-parachain` binary and publish it to S3 -- [ ] new `polkadot-parachain` version has [run on the network](../../docs/release.md#burnin) - without issue for at least 12h -- [ ] a draft release has been created in the [Github Releases page](https://github.com/paritytech/cumulus/releases) with the relevant release-notes -- [ ] the [build artifacts](../../docs/release.md#build-artifacts) have been added to the - draft-release. - ---- - -Read more about the [release documentation](../../docs/release.md). diff --git a/cumulus/.github/ISSUE_TEMPLATE/release-runtime.md b/cumulus/.github/ISSUE_TEMPLATE/release-runtime.md deleted file mode 100644 index 0f3543759afd..000000000000 --- a/cumulus/.github/ISSUE_TEMPLATE/release-runtime.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -name: Release Checklist for Runtime -about: Release Checklist for Runtime -title: Release Checklist for Runtime {{ env.VERSION }} ---- - -# Release Checklist - Runtimes - -**All** following checks must be completed before publishing a new release. -The release process is owned and led by @paritytech/release-engineering team. -The checks marked with :crab: are meant to be checked by [a runtime engineer](https://github.com/paritytech/cumulus/issues/1761). - -## Runtimes Release - -### Codebase -These checks should be performed on the codebase. - -- [ ] the [`spec_version`](https://github.com/paritytech/cumulus/blob/master/docs/release.md#spec-version) has been incremented since the - last release for any native runtimes from any existing use on public (non-private/test) networks -- [ ] :crab: previously [completed migrations](https://github.com/paritytech/cumulus/blob/master/docs/release.md#old-migrations-removed) are removed for any public (non-private/test) networks -- [ ] pallet and [extrinsic ordering](https://github.com/paritytech/cumulus/blob/master/docs/release.md#extrinsic-ordering--storage) as well as `SignedExtension`s have stayed - the same. Bump `transaction_version` otherwise -- [ ] the [benchmarks](https://github.com/paritytech/ci_cd/wiki/Benchmarks:-cumulus) ran -- [ ] the weights have been updated for any modified runtime logic -- [ ] :crab: the new weights are sane, there are no significant (>50%) drops or rises with no reason -- [ ] :crab: XCM config is compatible with the configurations and versions of relevant interlocutors, like the Relay Chain. - -### On the release branch - -The following checks can be performed after we have forked off to the release-candidate branch or started an additional release candidate branch (rc-2, rc-3, etc) - -- [ ] Verify [new migrations](https://github.com/paritytech/cumulus/blob/master/docs/release.md#new-migrations) complete successfully, and the - runtime state is correctly updated for any public (non-private/test) - networks -- [ ] Run [integration tests](https://github.com/paritytech/cumulus/blob/master/docs/release.md#integration-tests), and make sure they pass. -- [ ] Push runtime upgrade to Asset Hub Westend and verify network stability -- [ ] Push runtime upgrade to Collectives and verify network stability -- [ ] Push runtime upgrade to Bridge-Hub-Kusama and verify network stability - - -### Github - -- [ ] Check that a draft release has been created at the [Github Releases page](https://github.com/paritytech/cumulus/releases) with relevant [release - notes](https://github.com/paritytech/cumulus/blob/master/docs/release.md#release-notes) -- [ ] Check that [build artifacts](https://github.com/paritytech/cumulus/blob/master/docs/release.md#build-artifacts) have been added to the - draft-release. - -# Post release - -- [ ] :crab: all commits (runtime version bumps, fixes) on this release branch have been merged back to master. - ---- - -Read more about the [release documentation](https://github.com/paritytech/cumulus/blob/master/docs/release.md). diff --git a/polkadot/.github/ISSUE_TEMPLATE/bug_report.md b/polkadot/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index c2214ab7d932..000000000000 --- a/polkadot/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -- It would help if you submit info about the system you are running, e.g.: operating system, kernel version, amount of available memory and swap, etc. -- Logs could be very helpful. If possible, submit the whole log. Please format it as ```code blocks```. -- Describe the role your node plays, e.g. validator, full node or light client. -- Any command-line options were passed? diff --git a/polkadot/.github/ISSUE_TEMPLATE/release.md b/polkadot/.github/ISSUE_TEMPLATE/release.md deleted file mode 100644 index 37b422a9b3ec..000000000000 --- a/polkadot/.github/ISSUE_TEMPLATE/release.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -name: Release issue template -about: Tracking issue for new releases -title: Polkadot {{ env.VERSION }} Release checklist ---- -# Release Checklist - -This is the release checklist for Polkadot {{ env.VERSION }}. **All** following -checks should be completed before publishing a new release of the -Polkadot/Kusama/Westend/Rococo runtime or client. The current release candidate can be -checked out with `git checkout release-{{ env.VERSION }}` - -### Runtime Releases - -These checks should be performed on the codebase prior to forking to a release- -candidate branch. - -- [ ] Verify [`spec_version`](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#spec-version) has been incremented since the - last release for any native runtimes from any existing use on public - (non-private) networks. If the runtime was published (release or pre-release), either - the `spec_version` or `impl` must be bumped. -- [ ] Verify previously [completed migrations](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#old-migrations-removed) are - removed for any public (non-private/test) networks. -- [ ] Verify pallet and [extrinsic ordering](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#extrinsic-ordering) has stayed - the same. Bump `transaction_version` if not. -- [ ] Verify new extrinsics have been correctly whitelisted/blacklisted for - [proxy filters](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#proxy-filtering). -- [ ] Verify [benchmarks](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#benchmarks) have been updated for any modified - runtime logic. - -The following checks can be performed after we have forked off to the release- -candidate branch or started an additional release candidate branch (rc-2, rc-3, etc) - -- [ ] Verify [new migrations](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#new-migrations) complete successfully, and the - runtime state is correctly updated for any public (non-private/test) - networks. -- [ ] Verify [Polkadot JS API](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#polkadot-js) are up to date with the latest - runtime changes. -- [ ] Check with the Signer's team to make sure metadata update QR are lined up -- [ ] Push runtime upgrade to Westend and verify network stability. - -### All Releases - -- [ ] Check that the new client versions have [run on the network](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#burn-in) - without issue for 12+ hours on >75% of our validator nodes. -- [ ] Check that a draft release has been created at - https://github.com/paritytech/polkadot/releases with relevant [release - notes](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#release-notes) -- [ ] Check that [build artifacts](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#build-artifacts) have been added to the - draft-release -- [ ] Check that all items listed in the [milestone](https://github.com/paritytech/polkadot/milestones) are included in the release. -- [ ] Ensure that no `freenotes` were added into the release branch after the latest generated RC From e6f5e23b0f894954c0f04ba379966f2466f51de6 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 18 Sep 2023 16:35:40 +0200 Subject: [PATCH 15/16] [ci] Publish implementers guide (#1615) PR adds a job that publishes implementers guide cc https://github.com/paritytech/polkadot-sdk/issues/1614 cc https://github.com/paritytech/ci_cd/issues/879 --- .gitlab/pipeline/build.yml | 2 +- .gitlab/pipeline/publish.yml | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 3085d2230063..5b53798c403d 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -127,7 +127,7 @@ build-implementers-guide: - .kubernetes-env - .common-refs - .run-immediately - # - .collect-artifacts + - .collect-artifacts # git depth is set on purpose: https://github.com/paritytech/polkadot/issues/6284 variables: GIT_STRATEGY: clone diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 9e24b8606a4d..a03d407c0409 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -16,6 +16,8 @@ publish-rustdoc: needs: - job: build-rustdoc artifacts: true + - job: build-implementers-guide + artifacts: true script: # If $CI_COMMIT_REF_NAME doesn't match one of $RUSTDOCS_DEPLOY_REFS space-separated values, we # exit immediately. @@ -34,15 +36,21 @@ publish-rustdoc: - git fetch origin gh-pages # Save README and docs - cp -r ./crate-docs/ /tmp/doc/ + - cp -r ./artifacts/book/ /tmp/ - cp README.md /tmp/doc/ # we don't need to commit changes because we copy docs to /tmp - git checkout gh-pages --force + # Enable if docs needed for other refs # Install `index-tpl-crud` and generate index.html based on RUSTDOCS_DEPLOY_REFS - - which index-tpl-crud &> /dev/null || yarn global add @substrate/index-tpl-crud - - index-tpl-crud upsert ./index.html ${CI_COMMIT_REF_NAME} + # - which index-tpl-crud &> /dev/null || yarn global add @substrate/index-tpl-crud + # - index-tpl-crud upsert ./index.html ${CI_COMMIT_REF_NAME} # Ensure the destination dir doesn't exist. - rm -rf ${CI_COMMIT_REF_NAME} + - rm -rf book/ - mv -f /tmp/doc ${CI_COMMIT_REF_NAME} + # dir for implementors guide + - mkdir -p book + - mv /tmp/book/html/* book/ # Upload files - git add --all # `git commit` has an exit code of > 0 if there is nothing to commit. From a181ced46b6924d6179288e5142e81fdd0927d30 Mon Sep 17 00:00:00 2001 From: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Date: Mon, 18 Sep 2023 18:17:14 +0300 Subject: [PATCH 16/16] Replace free for all collation in `cumulus` runtimes (#1251) Partially fixes #103 This PR removes instances of "free for all" collation in the `glutton`, `shell`, and `seedling` runtimes and replaces them with Aura instances. Aura is configured without a session manager, so the initial authority set cannot be changed later on. --------- Signed-off-by: georgepisaltu --- Cargo.lock | 14 ++++ .../glutton/glutton-kusama/Cargo.toml | 16 ++++ .../glutton/glutton-kusama/src/lib.rs | 80 ++++++++++++++++--- .../glutton/glutton-kusama/src/weights/mod.rs | 1 + .../src/weights/pallet_timestamp.rs | 75 +++++++++++++++++ .../runtimes/starters/seedling/Cargo.toml | 14 +++- .../runtimes/starters/seedling/src/lib.rs | 76 +++++++++++++++--- .../runtimes/starters/shell/Cargo.toml | 13 +++ .../runtimes/starters/shell/src/lib.rs | 78 +++++++++++++++--- .../src/chain_spec/glutton.rs | 32 +++++++- .../src/chain_spec/seedling.rs | 8 +- .../src/chain_spec/shell.rs | 14 +++- 12 files changed, 378 insertions(+), 43 deletions(-) create mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs diff --git a/Cargo.lock b/Cargo.lock index a5ea33e1ae49..670d9c9e771c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5788,9 +5788,11 @@ dependencies = [ name = "glutton-runtime" version = "1.0.0" dependencies = [ + "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "cumulus-pallet-xcm", "cumulus-primitives-core", + "cumulus-primitives-timestamp", "frame-benchmarking", "frame-executive", "frame-support", @@ -5798,14 +5800,17 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", + "pallet-aura", "pallet-glutton", "pallet-sudo", + "pallet-timestamp", "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", "sp-api", "sp-block-builder", + "sp-consensus-aura", "sp-core", "sp-inherents", "sp-offchain", @@ -16058,20 +16063,25 @@ dependencies = [ name = "seedling-runtime" version = "0.1.0" dependencies = [ + "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "cumulus-pallet-solo-to-para", "cumulus-primitives-core", + "cumulus-primitives-timestamp", "frame-executive", "frame-support", "frame-system", + "pallet-aura", "pallet-balances", "pallet-sudo", + "pallet-timestamp", "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", "sp-api", "sp-block-builder", + "sp-consensus-aura", "sp-core", "sp-inherents", "sp-offchain", @@ -16302,6 +16312,7 @@ dependencies = [ name = "shell-runtime" version = "0.1.0" dependencies = [ + "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "cumulus-pallet-xcm", "cumulus-primitives-core", @@ -16309,12 +16320,15 @@ dependencies = [ "frame-support", "frame-system", "frame-try-runtime", + "pallet-aura", + "pallet-timestamp", "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", "sp-api", "sp-block-builder", + "sp-consensus-aura", "sp-core", "sp-inherents", "sp-offchain", diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml index 2e2975ab87b8..0ffe59b927f9 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml @@ -16,10 +16,13 @@ frame-system = { path = "../../../../../substrate/frame/system", default-feature frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true} pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -36,9 +39,11 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} # Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } parachain-info = { path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -55,6 +60,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-glutton/runtime-benchmarks", "pallet-sudo?/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", "parachains-common/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", @@ -62,9 +68,11 @@ runtime-benchmarks = [ ] std = [ "codec/std", + "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcm/std", "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", "frame-benchmarking?/std", "frame-executive/std", "frame-support/std", @@ -72,13 +80,16 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "frame-try-runtime?/std", + "pallet-aura/std", "pallet-glutton/std", "pallet-sudo/std", + "pallet-timestamp/std", "parachain-info/std", "parachains-common/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", + "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-offchain/std", @@ -93,14 +104,19 @@ std = [ "xcm/std", ] try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "frame-executive/try-runtime", "frame-support/try-runtime", "frame-system/try-runtime", "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", "pallet-glutton/try-runtime", "pallet-sudo/try-runtime", + "pallet-timestamp/try-runtime", "parachain-info/try-runtime", "sp-runtime/try-runtime", ] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs index dde8f747d463..41cb0fceebb5 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs @@ -46,11 +46,12 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod weights; pub mod xcm_config; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use sp_api::impl_runtime_apis; -use sp_core::OpaqueMetadata; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, + create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -64,24 +65,37 @@ pub use frame_support::{ construct_runtime, dispatch::DispatchClass, parameter_types, - traits::{Everything, IsInVec, Randomness}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, IsInVec, Randomness, + }, weights::{ constants::{ BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, }, IdentityFee, Weight, }, - StorageValue, + PalletId, StorageValue, }; use frame_system::{ limits::{BlockLength, BlockWeights}, EnsureRoot, }; -use parachains_common::{AccountId, Signature}; +use parachains_common::{ + kusama::consensus::{ + BLOCK_PROCESSING_VELOCITY, RELAY_CHAIN_SLOT_DURATION_MILLIS, UNINCLUDED_SEGMENT_CAPACITY, + }, + AccountId, Signature, SLOT_DURATION, +}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton"), @@ -178,12 +192,35 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = (); type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; } impl parachain_info::Config for Runtime {} +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + impl pallet_glutton::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_glutton::WeightInfo; @@ -204,6 +241,7 @@ construct_runtime! { Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, } = 1, ParachainInfo: parachain_info::{Pallet, Storage, Config} = 2, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, // DMP handler. CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 10, @@ -211,6 +249,10 @@ construct_runtime! { // The main stage. Glutton: pallet_glutton::{Pallet, Call, Storage, Event, Config} = 20, + // Collator support + Aura: pallet_aura::{Pallet, Storage, Config} = 30, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 31, + // Sudo. Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, } @@ -295,6 +337,16 @@ impl_runtime_apis! { } } + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic( extrinsic: ::Extrinsic, @@ -332,12 +384,14 @@ impl_runtime_apis! { } impl sp_session::SessionKeys for Runtime { - fn decode_session_keys(_: Vec) -> Option, sp_core::crypto::KeyTypeId)>> { - Some(Vec::new()) + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) } - fn generate_session_keys(_: Option>) -> Vec { - Vec::new() + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) } } @@ -402,5 +456,5 @@ impl_runtime_apis! { cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, - BlockExecutor = Executive, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs index 990558538acf..955010505d31 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs @@ -15,3 +15,4 @@ // along with Cumulus. If not, see . pub mod pallet_glutton; +pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs new file mode 100644 index 000000000000..8edae065f1b9 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs @@ -0,0 +1,75 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-kusama-dev +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(9_775_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 3_322_000 picoseconds. + Weight::from_parts(3_577_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 2cd09d3a9eb0..1b68b720d977 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -12,10 +12,13 @@ scale-info = { version = "2.9.0", default-features = false, features = ["derive" frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} frame-support = { path = "../../../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -26,11 +29,13 @@ sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction- sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} # Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } parachain-info = { path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -39,19 +44,24 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", default = [ "std" ] std = [ "codec/std", + "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-solo-to-para/std", "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", "frame-executive/std", "frame-support/std", "frame-system/std", + "pallet-aura/std", "pallet-balances/std", "pallet-sudo/std", + "pallet-timestamp/std", "parachain-info/std", "parachains-common/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", + "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-offchain/std", @@ -62,3 +72,5 @@ std = [ "sp-version/std", "substrate-wasm-builder", ] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 5f6733faf706..34e82737f82b 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -27,11 +27,12 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use sp_api::impl_runtime_apis; -use sp_core::OpaqueMetadata; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, + create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -46,7 +47,7 @@ pub use frame_support::{ construct_runtime, dispatch::DispatchClass, parameter_types, - traits::{IsInVec, Randomness}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, weights::{ constants::{ BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, @@ -61,6 +62,12 @@ use parachains_common::{AccountId, Signature}; pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + /// This runtime version. #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { @@ -80,6 +87,15 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included +/// into the relay chain. +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +/// How many parachain blocks are processed by the relay chain per parent. Limits the +/// number of blocks authored per slot. +const BLOCK_PROCESSING_VELOCITY: u32 = 1; +/// Relay chain slot duration, in milliseconds. +const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + /// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. /// This is used to limit the maximal weight of a single extrinsic. const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); @@ -174,23 +190,49 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = (); type XcmpMessageHandler = (); type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; } impl parachain_info::Config for Runtime {} +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<0>; + type WeightInfo = (); +} + construct_runtime! { pub enum Runtime { System: frame_system::{Pallet, Call, Storage, Config, Event}, Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, ParachainSystem: cumulus_pallet_parachain_system::{ Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, }, ParachainInfo: parachain_info::{Pallet, Storage, Config}, SoloToPara: cumulus_pallet_solo_to_para::{Pallet, Call, Storage, Event}, + Aura: pallet_aura::{Pallet, Storage, Config}, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config}, } } @@ -233,6 +275,16 @@ pub type Executive = frame_executive::Executive< >; impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { VERSION @@ -298,12 +350,14 @@ impl_runtime_apis! { } impl sp_session::SessionKeys for Runtime { - fn decode_session_keys(_: Vec) -> Option, sp_core::crypto::KeyTypeId)>> { - Some(Vec::new()) + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) } - fn generate_session_keys(_: Option>) -> Vec { - Vec::new() + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) } } @@ -316,5 +370,5 @@ impl_runtime_apis! { cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, - BlockExecutor = Executive, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 6f9046057102..46cef8d4ae08 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -13,8 +13,11 @@ frame-executive = { path = "../../../../../substrate/frame/executive", default-f frame-support = { path = "../../../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../../../substrate/frame/system", default-features = false} frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -30,6 +33,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} # Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -43,6 +47,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", default = [ "std" ] std = [ "codec/std", + "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcm/std", "cumulus-primitives-core/std", @@ -50,11 +55,14 @@ std = [ "frame-support/std", "frame-system/std", "frame-try-runtime?/std", + "pallet-aura/std", + "pallet-timestamp/std", "parachain-info/std", "parachains-common/std", "scale-info/std", "sp-api/std", "sp-block-builder/std", + "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-offchain/std", @@ -69,12 +77,17 @@ std = [ "xcm/std", ] try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "frame-executive/try-runtime", "frame-support/try-runtime", "frame-system/try-runtime", "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-timestamp/try-runtime", "parachain-info/try-runtime", "sp-runtime/try-runtime", ] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index ef914a246efc..477933b5c8d2 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -32,13 +32,14 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod xcm_config; use codec::{Decode, Encode}; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use frame_support::unsigned::TransactionValidityError; use scale_info::TypeInfo; use sp_api::impl_runtime_apis; -use sp_core::OpaqueMetadata; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, + create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -53,7 +54,7 @@ pub use frame_support::{ construct_runtime, dispatch::DispatchClass, parameter_types, - traits::{Everything, IsInVec, Randomness}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, weights::{ constants::{ BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, @@ -68,6 +69,12 @@ use parachains_common::{AccountId, Signature}; pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + /// This runtime version. #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { @@ -87,6 +94,15 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included +/// into the relay chain. +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +/// How many parachain blocks are processed by the relay chain per parent. Limits the +/// number of blocks authored per slot. +const BLOCK_PROCESSING_VELOCITY: u32 = 1; +/// Relay chain slot duration, in milliseconds. +const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + /// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. /// This is used to limit the maximal weight of a single extrinsic. const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); @@ -177,16 +193,41 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = (); type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; } impl parachain_info::Config for Runtime {} +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<0>; + type WeightInfo = (); +} + construct_runtime! { pub enum Runtime { System: frame_system::{Pallet, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + ParachainSystem: cumulus_pallet_parachain_system::{ Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, }, @@ -194,6 +235,9 @@ construct_runtime! { // DMP handler. CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin}, + + Aura: pallet_aura::{Pallet, Storage, Config}, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config}, } } @@ -263,6 +307,16 @@ pub type Executive = frame_executive::Executive< >; impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { VERSION @@ -328,12 +382,14 @@ impl_runtime_apis! { } impl sp_session::SessionKeys for Runtime { - fn decode_session_keys(_: Vec) -> Option, sp_core::crypto::KeyTypeId)>> { - Some(Vec::new()) + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) } - fn generate_session_keys(_: Option>) -> Vec { - Vec::new() + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) } } @@ -346,5 +402,5 @@ impl_runtime_apis! { cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, - BlockExecutor = Executive, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index acd5b5bfbe13..881fae398827 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -16,9 +16,12 @@ use crate::chain_spec::{get_account_id_from_seed, Extensions}; use cumulus_primitives_core::ParaId; +use parachains_common::AuraId; use sc_service::ChainType; use sp_core::sr25519; +use super::get_collator_keys_from_seed; + /// Specialized `ChainSpec` for the Glutton parachain runtime. pub type GluttonChainSpec = sc_service::GenericChainSpec; @@ -30,7 +33,7 @@ pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { // ID "glutton_dev", ChainType::Local, - move || glutton_genesis(para_id), + move || glutton_genesis(para_id, vec![get_collator_keys_from_seed::("Alice")]), Vec::new(), None, None, @@ -47,7 +50,15 @@ pub fn glutton_local_config(para_id: ParaId) -> GluttonChainSpec { // ID "glutton_local", ChainType::Local, - move || glutton_genesis(para_id), + move || { + glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + ) + }, Vec::new(), None, None, @@ -67,7 +78,15 @@ pub fn glutton_config(para_id: ParaId) -> GluttonChainSpec { // ID format!("glutton-kusama-{}", para_id).as_str(), ChainType::Live, - move || glutton_genesis(para_id), + move || { + glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + ) + }, Vec::new(), None, // Protocol ID @@ -78,7 +97,10 @@ pub fn glutton_config(para_id: ParaId) -> GluttonChainSpec { ) } -fn glutton_genesis(parachain_id: ParaId) -> glutton_runtime::RuntimeGenesisConfig { +fn glutton_genesis( + parachain_id: ParaId, + collators: Vec, +) -> glutton_runtime::RuntimeGenesisConfig { glutton_runtime::RuntimeGenesisConfig { system: glutton_runtime::SystemConfig { code: glutton_runtime::WASM_BINARY @@ -94,6 +116,8 @@ fn glutton_genesis(parachain_id: ParaId) -> glutton_runtime::RuntimeGenesisConfi trash_data_count: Default::default(), ..Default::default() }, + aura: glutton_runtime::AuraConfig { authorities: collators }, + aura_ext: Default::default(), sudo: glutton_runtime::SudoConfig { key: Some(get_account_id_from_seed::("Alice")), }, diff --git a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs index 3ebfb80d4685..6a5842320976 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs @@ -16,10 +16,12 @@ use crate::chain_spec::{get_account_id_from_seed, Extensions}; use cumulus_primitives_core::ParaId; -use parachains_common::AccountId; +use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; use sp_core::sr25519; +use super::get_collator_keys_from_seed; + /// Specialized `ChainSpec` for the seedling parachain runtime. pub type SeedlingChainSpec = sc_service::GenericChainSpec; @@ -33,6 +35,7 @@ pub fn get_seedling_chain_spec() -> SeedlingChainSpec { seedling_testnet_genesis( get_account_id_from_seed::("Alice"), 2000.into(), + vec![get_collator_keys_from_seed::("Alice")], ) }, Vec::new(), @@ -47,6 +50,7 @@ pub fn get_seedling_chain_spec() -> SeedlingChainSpec { fn seedling_testnet_genesis( root_key: AccountId, parachain_id: ParaId, + collators: Vec, ) -> seedling_runtime::RuntimeGenesisConfig { seedling_runtime::RuntimeGenesisConfig { system: seedling_runtime::SystemConfig { @@ -61,5 +65,7 @@ fn seedling_testnet_genesis( ..Default::default() }, parachain_system: Default::default(), + aura: seedling_runtime::AuraConfig { authorities: collators }, + aura_ext: Default::default(), } } diff --git a/cumulus/polkadot-parachain/src/chain_spec/shell.rs b/cumulus/polkadot-parachain/src/chain_spec/shell.rs index 7eb65591b12f..0899c1af3b4d 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/shell.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/shell.rs @@ -16,8 +16,11 @@ use crate::chain_spec::Extensions; use cumulus_primitives_core::ParaId; +use parachains_common::AuraId; use sc_service::ChainType; +use super::get_collator_keys_from_seed; + /// Specialized `ChainSpec` for the shell parachain runtime. pub type ShellChainSpec = sc_service::GenericChainSpec; @@ -27,7 +30,9 @@ pub fn get_shell_chain_spec() -> ShellChainSpec { "Shell Local Testnet", "shell_local_testnet", ChainType::Local, - move || shell_testnet_genesis(1000.into()), + move || { + shell_testnet_genesis(1000.into(), vec![get_collator_keys_from_seed::("Alice")]) + }, Vec::new(), None, None, @@ -37,7 +42,10 @@ pub fn get_shell_chain_spec() -> ShellChainSpec { ) } -fn shell_testnet_genesis(parachain_id: ParaId) -> shell_runtime::RuntimeGenesisConfig { +fn shell_testnet_genesis( + parachain_id: ParaId, + collators: Vec, +) -> shell_runtime::RuntimeGenesisConfig { shell_runtime::RuntimeGenesisConfig { system: shell_runtime::SystemConfig { code: shell_runtime::WASM_BINARY @@ -47,5 +55,7 @@ fn shell_testnet_genesis(parachain_id: ParaId) -> shell_runtime::RuntimeGenesisC }, parachain_info: shell_runtime::ParachainInfoConfig { parachain_id, ..Default::default() }, parachain_system: Default::default(), + aura: shell_runtime::AuraConfig { authorities: collators }, + aura_ext: Default::default(), } }