diff --git a/.config/nextest.toml b/.config/nextest.toml index 912bf2514a77..b4bdec4aea92 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -21,7 +21,6 @@ retries = 5 # The number of threads to run tests with. Supported values are either an integer or # the string "num-cpus". Can be overridden through the `--test-threads` option. # test-threads = "num-cpus" - test-threads = 20 # The number of threads required for each test. This is generally used in overrides to diff --git a/.github/env b/.github/env index bb61e1f4cd99..730c37f1db80 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index 9da05cac17b9..2c017b7d0c3e 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -58,7 +58,7 @@ def setup_logging(): %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean ''' -parser_bench = subparsers.add_parser('bench', help='Runs benchmarks', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) +parser_bench = subparsers.add_parser('bench', help='Runs benchmarks (old CLI)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) for arg, config in common_args.items(): parser_bench.add_argument(arg, **config) @@ -67,6 +67,35 @@ def setup_logging(): parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') + +""" +BENCH OMNI +""" + +bench_example = '''**Examples**: + Runs all benchmarks + %(prog)s + + Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions + %(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet + + Runs bench for all pallets for westend runtime and fails fast on first failed benchmark + %(prog)s --runtime westend --fail-fast + + Does not output anything and cleans up the previous bot's & author command triggering comments in PR + %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean +''' + +parser_bench_old = subparsers.add_parser('bench-omni', help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) + +for arg, config in common_args.items(): + parser_bench_old.add_argument(arg, **config) + +parser_bench_old.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) +parser_bench_old.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) +parser_bench_old.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') + + """ FMT """ @@ -98,12 +127,12 @@ def main(): print(f'args: {args}') - if args.command == 'bench': + if args.command == 'bench-omni': runtime_pallets_map = {} failed_benchmarks = {} successful_benchmarks = {} - profile = "release" + profile = "production" print(f'Provided runtimes: {args.runtime}') # convert to mapped dict @@ -113,11 +142,22 @@ def main(): # loop over remaining runtimes to collect available pallets for runtime in runtimesMatrix.values(): - os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}") + build_command = f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}" + print(f'-- building "{runtime["name"]}" with `{build_command}`') + os.system(build_command) print(f'-- listing pallets for benchmark for {runtime["name"]}') wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" - output = os.popen( - f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file} {runtime['bench_flags']}").read() + list_command = f"frame-omni-bencher v1 benchmark pallet " \ + f"--no-csv-header " \ + f"--no-storage-info " \ + f"--no-min-squares " \ + f"--no-median-slopes " \ + f"--all " \ + f"--list " \ + f"--runtime={wasm_file} " \ + f"{runtime['bench_flags']}" + print(f'-- running: {list_command}') + output = os.popen(list_command).read() raw_pallets = output.strip().split('\n') all_pallets = set() @@ -230,6 +270,149 @@ def main(): print_and_log('✅ Successful benchmarks of runtimes/pallets:') for runtime, pallets in successful_benchmarks.items(): print_and_log(f'-- {runtime}: {pallets}') + + if args.command == 'bench': + runtime_pallets_map = {} + failed_benchmarks = {} + successful_benchmarks = {} + + profile = "production" + + print(f'Provided runtimes: {args.runtime}') + # convert to mapped dict + runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) + runtimesMatrix = {x['name']: x for x in runtimesMatrix} + print(f'Filtered out runtimes: {runtimesMatrix}') + + # loop over remaining runtimes to collect available pallets + for runtime in runtimesMatrix.values(): + build_command = f"forklift cargo build -p {runtime['old_package']} --profile {profile} --features={runtime['bench_features']} --locked" + print(f'-- building {runtime["name"]} with `{build_command}`') + os.system(build_command) + + chain = runtime['name'] if runtime['name'] == 'dev' else f"{runtime['name']}-dev" + + machine_test = f"target/{profile}/{runtime['old_bin']} benchmark machine --chain={chain}" + print(f"Running machine test for `{machine_test}`") + os.system(machine_test) + + print(f'-- listing pallets for benchmark for {chain}') + list_command = f"target/{profile}/{runtime['old_bin']} " \ + f"benchmark pallet " \ + f"--no-csv-header " \ + f"--no-storage-info " \ + f"--no-min-squares " \ + f"--no-median-slopes " \ + f"--all " \ + f"--list " \ + f"--chain={chain}" + print(f'-- running: {list_command}') + output = os.popen(list_command).read() + raw_pallets = output.strip().split('\n') + + all_pallets = set() + for pallet in raw_pallets: + if pallet: + all_pallets.add(pallet.split(',')[0].strip()) + + pallets = list(all_pallets) + print(f'Pallets in {runtime["name"]}: {pallets}') + runtime_pallets_map[runtime['name']] = pallets + + print(f'\n') + + # filter out only the specified pallets from collected runtimes/pallets + if args.pallet: + print(f'Pallets: {args.pallet}') + new_pallets_map = {} + # keep only specified pallets if they exist in the runtime + for runtime in runtime_pallets_map: + if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): + new_pallets_map[runtime] = args.pallet + + runtime_pallets_map = new_pallets_map + + print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n') + + if not runtime_pallets_map: + if args.pallet and not args.runtime: + print(f"No pallets {args.pallet} found in any runtime") + elif args.runtime and not args.pallet: + print(f"{args.runtime} runtime does not have any pallets") + elif args.runtime and args.pallet: + print(f"No pallets {args.pallet} found in {args.runtime}") + else: + print('No runtimes found') + sys.exit(1) + + for runtime in runtime_pallets_map: + for pallet in runtime_pallets_map[runtime]: + config = runtimesMatrix[runtime] + header_path = os.path.abspath(config['header']) + template = None + + chain = config['name'] if runtime == 'dev' else f"{config['name']}-dev" + + print(f'-- config: {config}') + if runtime == 'dev': + # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) + search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" + print(f'-- running: {search_manifest_path}') + manifest_path = os.popen(search_manifest_path).read() + if not manifest_path: + print(f'-- pallet {pallet} not found in dev runtime') + if args.fail_fast: + print_and_log(f'Error: {pallet} not found in dev runtime') + sys.exit(1) + package_dir = os.path.dirname(manifest_path) + print(f'-- package_dir: {package_dir}') + print(f'-- manifest_path: {manifest_path}') + output_path = os.path.join(package_dir, "src", "weights.rs") + template = config['template'] + else: + default_path = f"./{config['path']}/src/weights" + xcm_path = f"./{config['path']}/src/weights/xcm" + output_path = default_path + if pallet.startswith("pallet_xcm_benchmarks"): + template = config['template'] + output_path = xcm_path + + print(f'-- benchmarking {pallet} in {runtime} into {output_path}') + cmd = f"target/{profile}/{config['old_bin']} benchmark pallet " \ + f"--extrinsic=* " \ + f"--chain={chain} " \ + f"--pallet={pallet} " \ + f"--header={header_path} " \ + f"--output={output_path} " \ + f"--wasm-execution=compiled " \ + f"--steps=50 " \ + f"--repeat=20 " \ + f"--heap-pages=4096 " \ + f"{f'--template={template} ' if template else ''}" \ + f"--no-storage-info --no-min-squares --no-median-slopes " + print(f'-- Running: {cmd} \n') + status = os.system(cmd) + + if status != 0 and args.fail_fast: + print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}') + sys.exit(1) + + # Otherwise collect failed benchmarks and print them at the end + # push failed pallets to failed_benchmarks + if status != 0: + failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] + else: + successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] + + if failed_benchmarks: + print_and_log('❌ Failed benchmarks of runtimes/pallets:') + for runtime, pallets in failed_benchmarks.items(): + print_and_log(f'-- {runtime}: {pallets}') + + if successful_benchmarks: + print_and_log('✅ Successful benchmarks of runtimes/pallets:') + for runtime, pallets in successful_benchmarks.items(): + print_and_log(f'-- {runtime}: {pallets}') elif args.command == 'fmt': command = f"cargo +nightly fmt" diff --git a/.github/scripts/cmd/test_cmd.py b/.github/scripts/cmd/test_cmd.py index 7b29fbfe90d8..68998b989909 100644 --- a/.github/scripts/cmd/test_cmd.py +++ b/.github/scripts/cmd/test_cmd.py @@ -47,7 +47,7 @@ def get_mock_bench_output(runtime, pallets, output_path, header, bench_flags, template = None): return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \ - f"--runtime=target/release/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ + f"--runtime=target/production/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ f"--pallet={pallets} --header={header} " \ f"--output={output_path} " \ f"--wasm-execution=compiled " \ @@ -93,7 +93,7 @@ def tearDown(self): def test_bench_command_normal_execution_all_runtimes(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)), pallet=['pallet_balances'], fail_fast=True, @@ -117,10 +117,10 @@ def test_bench_command_normal_execution_all_runtimes(self): expected_calls = [ # Build calls - call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"), - call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"), - call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"), - call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), call(get_mock_bench_output( runtime='kitchensink', @@ -150,7 +150,7 @@ def test_bench_command_normal_execution_all_runtimes(self): def test_bench_command_normal_execution(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['westend'], pallet=['pallet_balances', 'pallet_staking'], fail_fast=True, @@ -170,7 +170,7 @@ def test_bench_command_normal_execution(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( @@ -193,7 +193,7 @@ def test_bench_command_normal_execution(self): def test_bench_command_normal_execution_xcm(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['westend'], pallet=['pallet_xcm_benchmarks::generic'], fail_fast=True, @@ -213,7 +213,7 @@ def test_bench_command_normal_execution_xcm(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( @@ -229,7 +229,7 @@ def test_bench_command_normal_execution_xcm(self): def test_bench_command_two_runtimes_two_pallets(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['westend', 'rococo'], pallet=['pallet_balances', 'pallet_staking'], fail_fast=True, @@ -250,8 +250,8 @@ def test_bench_command_two_runtimes_two_pallets(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"), - call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( runtime='westend', @@ -287,7 +287,7 @@ def test_bench_command_two_runtimes_two_pallets(self): def test_bench_command_one_dev_runtime(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['dev'], pallet=['pallet_balances'], fail_fast=True, @@ -309,7 +309,7 @@ def test_bench_command_one_dev_runtime(self): expected_calls = [ # Build calls - call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( runtime='kitchensink', @@ -324,7 +324,7 @@ def test_bench_command_one_dev_runtime(self): def test_bench_command_one_cumulus_runtime(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['asset-hub-westend'], pallet=['pallet_assets'], fail_fast=True, @@ -344,7 +344,7 @@ def test_bench_command_one_cumulus_runtime(self): expected_calls = [ # Build calls - call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), # Asset-hub-westend runtime calls call(get_mock_bench_output( runtime='asset-hub-westend', @@ -359,7 +359,7 @@ def test_bench_command_one_cumulus_runtime(self): def test_bench_command_one_cumulus_runtime_xcm(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['asset-hub-westend'], pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'], fail_fast=True, @@ -379,7 +379,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self): expected_calls = [ # Build calls - call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), # Asset-hub-westend runtime calls call(get_mock_bench_output( runtime='asset-hub-westend', diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index e3dd6224f29b..00f8c089831e 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,24 +237,52 @@ fetch_release_artifacts() { popd > /dev/null } -# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: +# Fetch deb package from S3. Assumes the ENV are set: # - RELEASE_ID # - GITHUB_TOKEN # - REPO in the form paritytech/polkadot -fetch_release_artifacts_from_s3() { +fetch_debian_package_from_s3() { BINARY=$1 echo "Version : $VERSION" echo "Repo : $REPO" echo "Binary : $BINARY" + echo "Tag : $RELEASE_TAG" OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} echo "OUTPUT_DIR : $OUTPUT_DIR" URL_BASE=$(get_s3_url_base $BINARY) echo "URL_BASE=$URL_BASE" - URL_BINARY=$URL_BASE/$VERSION/$BINARY - URL_SHA=$URL_BASE/$VERSION/$BINARY.sha256 - URL_ASC=$URL_BASE/$VERSION/$BINARY.asc + URL=$URL_BASE/$RELEASE_TAG/x86_64-unknown-linux-gnu/${BINARY}_${VERSION}_amd64.deb + + mkdir -p "$OUTPUT_DIR" + pushd "$OUTPUT_DIR" > /dev/null + + echo "Fetching deb package..." + + echo "Fetching %s" "$URL" + curl --progress-bar -LO "$URL" || echo "Missing $URL" + + pwd + ls -al --color + popd > /dev/null + +} + +# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: +# inputs: binary (polkadot), target(aarch64-apple-darwin) +fetch_release_artifacts_from_s3() { + BINARY=$1 + TARGET=$2 + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${TARGET}/${BINARY}"} + echo "OUTPUT_DIR : $OUTPUT_DIR" + + URL_BASE=$(get_s3_url_base $BINARY) + echo "URL_BASE=$URL_BASE" + + URL_BINARY=$URL_BASE/$VERSION/$TARGET/$BINARY + URL_SHA=$URL_BASE/$VERSION/$TARGET/$BINARY.sha256 + URL_ASC=$URL_BASE/$VERSION/$TARGET/$BINARY.asc # Fetch artifacts mkdir -p "$OUTPUT_DIR" @@ -269,7 +297,7 @@ fetch_release_artifacts_from_s3() { pwd ls -al --color popd > /dev/null - + unset OUTPUT_DIR } # Pass the name of the binary as input, it will @@ -277,15 +305,26 @@ fetch_release_artifacts_from_s3() { function get_s3_url_base() { name=$1 case $name in - polkadot | polkadot-execute-worker | polkadot-prepare-worker | staking-miner) + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) printf "https://releases.parity.io/polkadot" ;; - polkadot-parachain) - printf "https://releases.parity.io/cumulus" + polkadot-parachain) + printf "https://releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "https://releases.parity.io/polkadot-omni-node" ;; - *) + chain-spec-builder) + printf "https://releases.parity.io/chain-spec-builder" + ;; + + frame-omni-bencher) + printf "https://releases.parity.io/frame-omni-bencher" + ;; + *) printf "UNSUPPORTED BINARY $name" exit 1 ;; @@ -468,3 +507,16 @@ validate_stable_tag() { exit 1 fi } + +# Prepare docker stable tag form the polkadot stable tag +# input: tag (polkaodot-stableYYMM(-X) or polkadot-stableYYMM(-X)-rcX) +# output: stableYYMM(-X) or stableYYMM(-X)-rcX +prepare_docker_stable_tag() { + tag="$1" + if [[ "$tag" =~ stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)? ]]; then + echo "${BASH_REMATCH[0]}" + else + echo "Tag is invalid: $tag" + exit 1 + fi +} diff --git a/.github/scripts/release/build-linux-release.sh b/.github/scripts/release/build-linux-release.sh index a6bd658d292a..874c9b44788b 100755 --- a/.github/scripts/release/build-linux-release.sh +++ b/.github/scripts/release/build-linux-release.sh @@ -3,6 +3,8 @@ # This is used to build our binaries: # - polkadot # - polkadot-parachain +# - polkadot-omni-node +# # set -e BIN=$1 @@ -21,7 +23,7 @@ time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PAC echo "Artifact target: $ARTIFACTS" cp ./target/$PROFILE/$BIN "$ARTIFACTS" -pushd "$ARTIFACTS" > /dev/nul +pushd "$ARTIFACTS" > /dev/null sha256sum "$BIN" | tee "$BIN.sha256" EXTRATAG="$($ARTIFACTS/$BIN --version | diff --git a/.github/scripts/release/build-macos-release.sh b/.github/scripts/release/build-macos-release.sh new file mode 100755 index 000000000000..ba6dcc65d650 --- /dev/null +++ b/.github/scripts/release/build-macos-release.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# This is used to build our binaries: +# - polkadot +# - polkadot-parachain +# - polkadot-omni-node +# set -e + +BIN=$1 +PACKAGE=${2:-$BIN} + +PROFILE=${PROFILE:-production} +# parity-macos runner needs a path where it can +# write, so make it relative to github workspace. +ARTIFACTS=$GITHUB_WORKSPACE/artifacts/$BIN +VERSION=$(git tag -l --contains HEAD | grep -E "^v.*") + +echo "Artifacts will be copied into $ARTIFACTS" +mkdir -p "$ARTIFACTS" + +git log --pretty=oneline -n 1 +time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PACKAGE + +echo "Artifact target: $ARTIFACTS" + +cp ./target/$PROFILE/$BIN "$ARTIFACTS" +pushd "$ARTIFACTS" > /dev/null +sha256sum "$BIN" | tee "$BIN.sha256" + +EXTRATAG="$($ARTIFACTS/$BIN --version | + sed -n -r 's/^'$BIN' ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')" + +EXTRATAG="${VERSION}-${EXTRATAG}-$(cut -c 1-8 $ARTIFACTS/$BIN.sha256)" + +echo "$BIN version = ${VERSION} (EXTRATAG = ${EXTRATAG})" +echo -n ${VERSION} > "$ARTIFACTS/VERSION" +echo -n ${EXTRATAG} > "$ARTIFACTS/EXTRATAG" diff --git a/.github/scripts/release/distributions b/.github/scripts/release/distributions new file mode 100644 index 000000000000..a430ec76c6ba --- /dev/null +++ b/.github/scripts/release/distributions @@ -0,0 +1,39 @@ +Origin: Parity +Label: Parity +Codename: release +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity Staging +Codename: staging +Architectures: amd64 +Components: main +Description: Staging distribution for Parity Technologies Ltd. packages +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2407 +Codename: stable2407 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2409 +Codename: stable2409 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2412 +Codename: stable2412 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index f5032073b617..984709f2ea03 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Set the new version by replacing the value of the constant given as patetrn +# Set the new version by replacing the value of the constant given as pattern # in the file. # # input: pattern, version, file @@ -119,21 +119,79 @@ set_polkadot_parachain_binary_version() { upload_s3_release() { + alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' + + product=$1 + version=$2 + target=$3 + + echo "Working on product: $product " + echo "Working on version: $version " + echo "Working on platform: $target " + + URL_BASE=$(get_s3_url_base $product) + + echo "Current content, should be empty on new uploads:" + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize || true + echo "Content to be uploaded:" + artifacts="release-artifacts/$target/$product/" + ls "$artifacts" + aws s3 sync --acl public-read "$artifacts" "s3://${URL_BASE}/${version}/${target}" + echo "Uploaded files:" + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize + echo "✅ The release should be at https://${URL_BASE}/${version}/${target}" +} + +# Upload runtimes artifacts to s3 release bucket +# +# input: version (stable release tage.g. polkadot-stable2412 or polkadot-stable2412-rc1) +# output: none +upload_s3_runtimes_release_artifacts() { alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' - product=$1 - version=$2 + version=$1 - echo "Working on product: $product " echo "Working on version: $version " echo "Current content, should be empty on new uploads:" - aws s3 ls "s3://releases.parity.io/polkadot/${version}/" --recursive --human-readable --summarize || true + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize || true echo "Content to be uploaded:" - artifacts="artifacts/$product/" + artifacts="artifacts/runtimes/" ls "$artifacts" - aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/${version}/" + aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/runtimes/${version}/" echo "Uploaded files:" - aws s3 ls "s3://releases.parity.io/polkadot/${version}/" --recursive --human-readable --summarize - echo "✅ The release should be at https://releases.parity.io/polkadot/${version}" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize + echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" +} + + +# Pass the name of the binary as input, it will +# return the s3 base url +function get_s3_url_base() { + name=$1 + case $name in + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) + printf "releases.parity.io/polkadot" + ;; + + polkadot-parachain) + printf "releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "releases.parity.io/chain-spec-builder" + ;; + + frame-omni-bencher) + printf "releases.parity.io/frame-omni-bencher" + ;; + *) + printf "UNSUPPORTED BINARY $name" + exit 1 + ;; + esac } diff --git a/.github/workflows/build-misc.yml b/.github/workflows/build-misc.yml index a9b433a94b64..335c26282027 100644 --- a/.github/workflows/build-misc.yml +++ b/.github/workflows/build-misc.yml @@ -20,7 +20,7 @@ jobs: uses: ./.github/workflows/reusable-preflight.yml build-runtimes-polkavm: - timeout-minutes: 20 + timeout-minutes: 60 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} container: @@ -38,13 +38,9 @@ jobs: env: SUBSTRATE_RUNTIME_TARGET: riscv id: required - run: | - forklift cargo check -p minimal-template-runtime - forklift cargo check -p westend-runtime - forklift cargo check -p rococo-runtime - forklift cargo check -p polkadot-test-runtime + run: forklift cargo check -p minimal-template-runtime -p westend-runtime -p rococo-runtime -p polkadot-test-runtime - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -73,7 +69,7 @@ jobs: cd ./substrate/bin/utils/subkey forklift cargo build --locked --release - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/check-frame-omni-bencher.yml b/.github/workflows/check-frame-omni-bencher.yml index b47c9d49feaf..bc0ff82b6774 100644 --- a/.github/workflows/check-frame-omni-bencher.yml +++ b/.github/workflows/check-frame-omni-bencher.yml @@ -41,7 +41,7 @@ jobs: forklift cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks forklift cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -99,7 +99,7 @@ jobs: echo "Running command: $cmd" eval "$cmd" - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index dd9d3eaf824f..cea6b9a8636a 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -33,7 +33,7 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: Lychee link checker - uses: lycheeverse/lychee-action@7cd0af4c74a61395d455af97419279d86aafaede # for v1.9.1 (10. Jan 2024) + uses: lycheeverse/lychee-action@f81112d0d2814ded911bd23e3beaa9dda9093915 # for v1.9.1 (10. Jan 2024) with: args: >- --config .config/lychee.toml diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 78602410cdf6..16028c8de770 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -11,7 +11,7 @@ concurrency: cancel-in-progress: true env: - TOOLCHAIN: nightly-2024-06-01 + TOOLCHAIN: nightly-2024-11-19 jobs: preflight: @@ -74,10 +74,15 @@ jobs: - name: install parity-publish # Set the target dir to cache the build. - run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.8.0 --locked -q + run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.3 --locked -q - name: check semver run: | + if [ -z "$PR" ]; then + echo "Skipping master/merge queue" + exit 0 + fi + export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' export SKIP_WASM_BUILD=1 diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 4fcaf80c83fc..4c26b85a6303 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -30,7 +30,7 @@ jobs: id: required run: cargo +nightly fmt --all -- --check - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -97,7 +97,6 @@ jobs: --exclude "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" - "substrate/frame/revive/fixtures/build" "substrate/frame/revive/fixtures/contracts/common" - name: deny git deps run: python3 .github/scripts/deny-git-deps.py . diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index c240504fa1e7..02428711811f 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -36,7 +36,7 @@ jobs: cargo clippy --all-targets --locked --workspace --quiet cargo clippy --all-targets --all-features --locked --workspace --quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -62,7 +62,7 @@ jobs: # experimental code may rely on try-runtime and vice-versa forklift cargo check --locked --all --features try-runtime,experimental --quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -91,7 +91,7 @@ jobs: ./check-features-variants.sh cd - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index 525ab0c0fc23..b6a50ea0d15e 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -19,10 +19,10 @@ jobs: steps: - name: Generate token id: generate_token - uses: tibdex/github-app-token@v2.1.0 + uses: actions/create-github-app-token@v1 with: - app_id: ${{ secrets.CMD_BOT_APP_ID }} - private_key: ${{ secrets.CMD_BOT_APP_KEY }} + app-id: ${{ secrets.CMD_BOT_APP_ID }} + private-key: ${{ secrets.CMD_BOT_APP_KEY }} - name: Check if user is a member of the organization id: is-member @@ -227,7 +227,8 @@ jobs: cat .github/env >> $GITHUB_OUTPUT if [ -n "$IMAGE_OVERRIDE" ]; then - echo "IMAGE=$IMAGE_OVERRIDE" >> $GITHUB_OUTPUT + IMAGE=$IMAGE_OVERRIDE + echo "IMAGE=$IMAGE" >> $GITHUB_OUTPUT fi if [[ $BODY == "/cmd bench"* ]]; then @@ -237,6 +238,10 @@ jobs: else echo "RUNNER=ubuntu-latest" >> $GITHUB_OUTPUT fi + - name: Print outputs + run: | + echo "RUNNER=${{ steps.set-image.outputs.RUNNER }}" + echo "IMAGE=${{ steps.set-image.outputs.IMAGE }}" # Get PR branch name, because the issue_comment event does not contain the PR branch name get-pr-branch: @@ -283,10 +288,24 @@ jobs: env: JOB_NAME: "cmd" runs-on: ${{ needs.set-image.outputs.RUNNER }} - timeout-minutes: 4320 # 72 hours -> 3 days; as it could take a long time to run all the runtimes/pallets container: image: ${{ needs.set-image.outputs.IMAGE }} + timeout-minutes: 1440 # 24 hours per runtime steps: + - name: Generate token + uses: actions/create-github-app-token@v1 + id: generate_token + with: + app-id: ${{ secrets.CMD_BOT_APP_ID }} + private-key: ${{ secrets.CMD_BOT_APP_KEY }} + + - name: Checkout + uses: actions/checkout@v4 + with: + token: ${{ steps.generate_token.outputs.token }} + repository: ${{ needs.get-pr-branch.outputs.repo }} + ref: ${{ needs.get-pr-branch.outputs.pr-branch }} + - name: Get command uses: actions-ecosystem/action-regex-match@v2 id: get-pr-comment @@ -340,13 +359,7 @@ jobs: repo: context.repo.repo, body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has started 🚀 [See logs here](${job_url})` }) - - - name: Checkout - uses: actions/checkout@v4 - with: - repository: ${{ needs.get-pr-branch.outputs.repo }} - ref: ${{ needs.get-pr-branch.outputs.pr-branch }} - + - name: Install dependencies for bench if: startsWith(steps.get-pr-comment.outputs.group2, 'bench') run: | @@ -364,6 +377,7 @@ jobs: # Fixes "detected dubious ownership" error in the ci git config --global --add safe.directory '*' git remote -v + cat /proc/cpuinfo python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt python3 .github/scripts/cmd/cmd.py $CMD $PR_ARG git status @@ -389,16 +403,30 @@ jobs: - name: Commit changes run: | if [ -n "$(git status --porcelain)" ]; then - git config --local user.email "action@github.com" - git config --local user.name "GitHub Action" + git config --global user.name command-bot + git config --global user.email "<>" + git config --global pull.rebase false + + # Push the results to the target branch + git remote add \ + github \ + "https://token:${{ steps.generate_token.outputs.token }}@github.com/${{ github.event.repository.owner.login }}/${{ github.event.repository.name }}.git" || : + + push_changes() { + git push github "HEAD:${{ needs.get-pr-branch.outputs.pr-branch }}" + } git add . git restore --staged Cargo.lock # ignore changes in Cargo.lock git commit -m "Update from ${{ github.actor }} running command '${{ steps.get-pr-comment.outputs.group2 }}'" || true - git pull --rebase origin ${{ needs.get-pr-branch.outputs.pr-branch }} - - git push origin ${{ needs.get-pr-branch.outputs.pr-branch }} + # Attempt to push changes + if ! push_changes; then + echo "Push failed, trying to rebase..." + git pull --rebase github "${{ needs.get-pr-branch.outputs.pr-branch }}" + # After successful rebase, try pushing again + push_changes + fi else echo "Nothing to commit"; fi diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index 8f23bcd75f01..8a017a434525 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -40,7 +40,7 @@ jobs: uses: korthout/backport-action@v3 id: backport with: - target_branches: stable2407 stable2409 + target_branches: stable2407 stable2409 stable2412 merge_commits: skip github_token: ${{ steps.generate_token.outputs.token }} pull_description: | @@ -86,7 +86,7 @@ jobs: const reviewer = '${{ github.event.pull_request.user.login }}'; for (const pullNumber of pullNumbers) { - await github.pulls.createReviewRequest({ + await github.pulls.requestReviewers({ owner: context.repo.owner, repo: context.repo.repo, pull_number: parseInt(pullNumber), diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index cc84e7f9ad3b..b7c70c9e6d66 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -29,7 +29,7 @@ jobs: env: RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -69,7 +69,7 @@ jobs: retention-days: 1 if-no-files-found: error - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/publish-check-compile.yml b/.github/workflows/publish-check-compile.yml new file mode 100644 index 000000000000..ada8635e314e --- /dev/null +++ b/.github/workflows/publish-check-compile.yml @@ -0,0 +1,48 @@ +name: Check publish build + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + preflight: + uses: ./.github/workflows/reusable-preflight.yml + + check-publish: + timeout-minutes: 90 + needs: [preflight] + runs-on: ${{ needs.preflight.outputs.RUNNER }} + container: + image: ${{ needs.preflight.outputs.IMAGE }} + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + + - name: Rust Cache + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish@0.10.3 --locked -q + + - name: parity-publish update plan + run: parity-publish --color always plan --skip-check --prdoc prdoc/ + + - name: parity-publish apply plan + run: parity-publish --color always apply --registry + + - name: parity-publish check compile + run: | + packages="$(parity-publish apply --print)" + + if [ -n "$packages" ]; then + cargo --color always check $(printf -- '-p %s ' $packages) + fi diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index 3fad3b641474..3150cb9dd405 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -24,7 +24,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.8.0 --locked -q + run: cargo install parity-publish@0.10.3 --locked -q - name: parity-publish check run: parity-publish --color always check --allow-unpublished diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index 37bf06bb82d8..a6efc8a5599e 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -18,7 +18,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.8.0 --locked -q + run: cargo install parity-publish@0.10.3 --locked -q - name: parity-publish claim env: diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-10_branchoff-stable.yml similarity index 100% rename from .github/workflows/release-branchoff-stable.yml rename to .github/workflows/release-10_branchoff-stable.yml diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-11_rc-automation.yml similarity index 100% rename from .github/workflows/release-10_rc-automation.yml rename to .github/workflows/release-11_rc-automation.yml diff --git a/.github/workflows/release-20_build-rc.yml b/.github/workflows/release-20_build-rc.yml new file mode 100644 index 000000000000..d4c7055c37c5 --- /dev/null +++ b/.github/workflows/release-20_build-rc.yml @@ -0,0 +1,263 @@ +name: Release - Build node release candidate + +on: + workflow_dispatch: + inputs: + binary: + description: Binary to be build for the release + default: all + type: choice + options: + - polkadot + - polkadot-parachain + - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder + - all + + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) + type: string + +jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + build-polkadot-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-parachain-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-parachain"]' + package: "polkadot-parachain-bin" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-omni-node-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-omni-node"]' + package: "polkadot-omni-node" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-frame-omni-bencher-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: "frame-omni-bencher" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-parachain-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-parachain"]' + package: polkadot-parachain-bin + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-omni-node-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-omni-node"]' + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-frame-omni-bencher-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 376f5fbce909..78ceea91f100 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -1,19 +1,46 @@ name: Release - Publish draft -on: - push: - tags: - # Catches v1.2.3 and v1.2.3-rc1 - - v[0-9]+.[0-9]+.[0-9]+* - # - polkadot-stable[0-9]+* Activate when the release process from release org is setteled +# This workflow runs in paritytech-release and creates full release draft with: +# - release notes +# - info about the runtimes +# - attached artifacts: +# - runtimes +# - binaries +# - signatures +on: workflow_dispatch: inputs: - version: - description: Current release/rc version + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) + required: true + type: string jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + get-rust-versions: + needs: [ validate-inputs ] runs-on: ubuntu-latest outputs: rustc-stable: ${{ steps.get-rust-versions.outputs.stable }} @@ -24,47 +51,28 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: + needs: [ validate-inputs ] uses: "./.github/workflows/release-srtool.yml" with: excluded_runtimes: "asset-hub-rococo bridge-hub-rococo contracts-rococo coretime-rococo people-rococo rococo rococo-parachain substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template polkadot-sdk-docs-first" build_opts: "--features on-chain-release-build" - - build-binaries: - runs-on: ubuntu-latest - strategy: - matrix: - # Tuples of [package, binary-name] - binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder], [polkadot-omni-node, polkadot-omni-node] ] - steps: - - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - - - name: Install protobuf-compiler - run: | - sudo apt update - sudo apt install -y protobuf-compiler - - - name: Build ${{ matrix.binary[1] }} binary - run: | - cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} - target/production/${{ matrix.binary[1] }} --version - - - name: Upload ${{ matrix.binary[1] }} binary - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: ${{ matrix.binary[1] }} - path: target/production/${{ matrix.binary[1] }} - + profile: production + permissions: + id-token: write + attestations: write + contents: read publish-release-draft: runs-on: ubuntu-latest - needs: [ get-rust-versions, build-runtimes ] + environment: release + needs: [ validate-inputs, get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} + steps: - name: Checkout - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -87,20 +95,21 @@ jobs: GLUTTON_WESTEND_DIGEST: ${{ github.workspace}}/glutton-westend-runtime/glutton-westend-srtool-digest.json PEOPLE_WESTEND_DIGEST: ${{ github.workspace}}/people-westend-runtime/people-westend-srtool-digest.json WESTEND_DIGEST: ${{ github.workspace}}/westend-runtime/westend-srtool-digest.json + RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} shell: bash run: | . ./.github/scripts/common/lib.sh export REF1=$(get_latest_release_tag) - if [[ -z "${{ inputs.version }}" ]]; then + if [[ -z "$RELEASE_TAG" ]]; then export REF2="${{ github.ref_name }}" echo "REF2: ${REF2}" else - export REF2="${{ inputs.version }}" + export REF2="$RELEASE_TAG" echo "REF2: ${REF2}" fi echo "REL_TAG=$REF2" >> $GITHUB_ENV - export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]+).*$/\1/') + export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]{4}(-[0-9]+)?).*$/\1/') ./scripts/release/build-changelogs.sh @@ -112,19 +121,29 @@ jobs: scripts/release/context.json **/*-srtool-digest.json + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Create draft release id: create-release - uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ env.REL_TAG }} - release_name: Polkadot ${{ env.REL_TAG }} - body_path: ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md - draft: true + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release create ${{ env.REL_TAG }} \ + --repo paritytech/polkadot-sdk \ + --draft \ + --title "Polkadot ${{ env.REL_TAG }}" \ + --notes-file ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md publish-runtimes: - needs: [ build-runtimes, publish-release-draft ] + needs: [ validate-inputs, build-runtimes, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: @@ -132,7 +151,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -144,44 +163,83 @@ jobs: >>$GITHUB_ENV echo ASSET=$(find ${{ matrix.chain }}-runtime -name '*.compact.compressed.wasm') >>$GITHUB_ENV echo SPEC=$(<${JSON} jq -r .runtimes.compact.subwasm.core_version.specVersion) + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Upload compressed ${{ matrix.chain }} v${{ env.SPEC }} wasm - if: ${{ matrix.chain != 'rococo-parachain' }} - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ env.ASSET }} - asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm - asset_content_type: application/wasm + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + '${{ env.ASSET }}#${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm' - publish-binaries: - needs: [ publish-release-draft, build-binaries ] + publish-release-artifacts: + needs: [ validate-inputs, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: matrix: - binary: [frame-omni-bencher, chain-spec-builder, polkadot-omni-node] + binary: [ polkadot, polkadot-execute-worker, polkadot-prepare-worker, polkadot-parachain, polkadot-omni-node, frame-omni-bencher, chain-spec-builder ] + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] steps: - - name: Download artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ needs.validate-inputs.outputs.release_tag }}" + fetch_release_artifacts_from_s3 ${{ matrix.binary }} ${{ matrix.target }} + + - name: Rename aarch64-apple-darwin binaries + if: ${{ matrix.target == 'aarch64-apple-darwin' }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + mv ${{ matrix.binary }} ${{ matrix.binary }}-aarch64-apple-darwin + mv ${{ matrix.binary }}.asc ${{ matrix.binary }}-aarch64-apple-darwin.asc + mv ${{ matrix.binary }}.sha256 ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 with: - name: ${{ matrix.binary }} + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk - - name: Upload ${{ matrix.binary }} binary - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 + - name: Upload ${{ matrix.binary }} binary to release draft env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ github.workspace}}/${{ matrix.binary }} - asset_name: ${{ matrix.binary }} - asset_content_type: application/octet-stream + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + if [[ ${{ matrix.target }} == "aarch64-apple-darwin" ]]; then + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }}-aarch64-apple-darwin \ + ${{ matrix.binary }}-aarch64-apple-darwin.asc \ + ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + else + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }} \ + ${{ matrix.binary }}.asc \ + ${{ matrix.binary }}.sha256 + fi post_to_matrix: runs-on: ubuntu-latest - needs: publish-release-draft + needs: [ validate-inputs, publish-release-draft ] environment: release strategy: matrix: @@ -197,5 +255,5 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - **New version of polkadot tagged**: ${{ github.ref_name }}
- Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} + **New version of polkadot tagged**: ${{ needs.validate-inputs.outputs.release_tag }}
+ And release draft is release created in [polkadot-sdk repo](https://github.com/paritytech/polkadot-sdk/releases) diff --git a/.github/workflows/release-31_promote-rc-to-final.yml b/.github/workflows/release-31_promote-rc-to-final.yml new file mode 100644 index 000000000000..6aa9d4bddd1d --- /dev/null +++ b/.github/workflows/release-31_promote-rc-to-final.yml @@ -0,0 +1,125 @@ +name: Release - Promote RC to final candidate on S3 + +on: + workflow_dispatch: + inputs: + binary: + description: Binary to be build for the release + default: all + type: choice + options: + - polkadot + - polkadot-parachain + - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder + - all + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX + type: string + + +jobs: + + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + final_tag: ${{ steps.validate_inputs.outputs.final_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + promote-polkadot-rc-to-final: + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-parachain-rc-to-final: + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-parachain + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-omni-node-rc-to-final: + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-frame-omni-bencher-rc-to-final: + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-chain-spec-builder-rc-to-final: + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-40_publish-deb-package.yml b/.github/workflows/release-40_publish-deb-package.yml new file mode 100644 index 000000000000..3c5411ab16f0 --- /dev/null +++ b/.github/workflows/release-40_publish-deb-package.yml @@ -0,0 +1,152 @@ +name: Release - Publish polakdot deb package + +on: + workflow_dispatch: + inputs: + tag: + description: Current final release tag in the format polakdot-stableYYMM or polkadot-stable-YYMM-X + default: polkadot-stable2412 + required: true + type: string + + distribution: + description: Distribution where to publish deb package (release, staging, stable2407, etc) + default: staging + required: true + type: string + +jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + + fetch-artifacts-from-s3: + runs-on: ubuntu-latest + needs: [validate-inputs] + env: + REPO: ${{ github.repository }} + RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} + outputs: + VERSION: ${{ steps.fetch_artifacts_from_s3.outputs.VERSION }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Fetch rc artifacts or release artifacts from s3 based on version + id: fetch_artifacts_from_s3 + run: | + . ./.github/scripts/common/lib.sh + + VERSION="$(get_polkadot_node_version_from_code)" + echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT + + fetch_debian_package_from_s3 polkadot + + - name: Upload artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: release-artifacts + path: release-artifacts/polkadot/*.deb + + publish-deb-package: + runs-on: ubuntu-latest + needs: [fetch-artifacts-from-s3] + environment: release + env: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_DEB_PATH: "s3://releases-package-repos/deb" + LOCAL_DEB_REPO_PATH: ${{ github.workspace }}/deb + VERSION: ${{ needs.fetch-artifacts-from-s3.outputs.VERSION }} + + steps: + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign built artifacts + python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@1f8555426662ac93a3849480a35449f683b1c89f" + echo "PGPKMS_REPREPRO_PATH=$(which pgpkms-reprepro)" >> $GITHUB_ENV + + - name: Install awscli + run: | + python3 -m pip install awscli + which aws + + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Import gpg keys + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: release-artifacts + path: release-artifacts + + - name: Setup local deb repo + run: | + sudo apt-get install -y reprepro + which reprepro + + sed -i "s|^SignWith:.*|SignWith: ! ${PGPKMS_REPREPRO_PATH}|" ${{ github.workspace }}/.github/scripts/release/distributions + + mkdir -p ${{ github.workspace }}/deb/conf + cp ${{ github.workspace }}/.github/scripts/release/distributions ${{ github.workspace }}/deb/conf/distributions + cat ${{ github.workspace }}/deb/conf/distributions + + - name: Sync local deb repo + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + run: | + # Download the current state of the deb repo + aws s3 sync "$AWS_DEB_PATH/db" "$LOCAL_DEB_REPO_PATH/db" + aws s3 sync "$AWS_DEB_PATH/pool" "$LOCAL_DEB_REPO_PATH/pool" + aws s3 sync "$AWS_DEB_PATH/dists" "$LOCAL_DEB_REPO_PATH/dists" + + - name: Add deb package to local repo + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + # Add the new deb to the repo + reprepro -b "$LOCAL_DEB_REPO_PATH" includedeb "${{ inputs.distribution }}" "release-artifacts/polkadot_${VERSION}_amd64.deb" + + - name: Upload updated deb repo + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + run: | + # Upload the updated repo - dists and pool should be publicly readable + aws s3 sync "$LOCAL_DEB_REPO_PATH/pool" "$AWS_DEB_PATH/pool" --acl public-read + aws s3 sync "$LOCAL_DEB_REPO_PATH/dists" "$AWS_DEB_PATH/dists" --acl public-read + aws s3 sync "$LOCAL_DEB_REPO_PATH/db" "$AWS_DEB_PATH/db" + aws s3 sync "$LOCAL_DEB_REPO_PATH/conf" "$AWS_DEB_PATH/conf" + + # Invalidate caches to make sure latest files are served + aws cloudfront create-invalidation --distribution-id E36FKEYWDXAZYJ --paths '/deb/*' diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 627e53bacd88..5c3c3a6e854d 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -4,10 +4,6 @@ name: Release - Publish Docker Image # It builds and published releases and rc candidates. on: - #TODO: activate automated run later - # release: - # types: - # - published workflow_dispatch: inputs: image_type: @@ -30,16 +26,6 @@ on: - polkadot-parachain - chain-spec-builder - release_id: - description: | - Release ID. - You can find it using the command: - curl -s \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ - jq '.[] | { name: .name, id: .id }' - required: true - type: number - registry: description: Container registry required: true @@ -55,7 +41,7 @@ on: default: parity version: - description: version to build/release + description: Version of the polkadot node release in format v1.16.0 or v1.16.0-rc1 default: v0.9.18 required: true @@ -78,11 +64,15 @@ env: IMAGE_TYPE: ${{ inputs.image_type }} jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' runs-on: ubuntu-latest outputs: version: ${{ steps.validate_inputs.outputs.VERSION }} - release_id: ${{ steps.validate_inputs.outputs.RELEASE_ID }} stable_tag: ${{ steps.validate_inputs.outputs.stable_tag }} steps: @@ -97,11 +87,6 @@ jobs: VERSION=$(filter_version_from_input "${{ inputs.version }}") echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") - echo "RELEASE_ID=${RELEASE_ID}" >> $GITHUB_OUTPUT - - echo "Release ID: $RELEASE_ID" - STABLE_TAG=$(validate_stable_tag ${{ inputs.stable_tag }}) echo "stable_tag=${STABLE_TAG}" >> $GITHUB_OUTPUT @@ -114,50 +99,26 @@ jobs: - name: Checkout sources uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - #TODO: this step will be needed when automated triggering will work - #this step runs only if the workflow is triggered automatically when new release is published - # if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} - # run: | - # mkdir -p release-artifacts && cd release-artifacts - - # for f in $BINARY $BINARY.asc $BINARY.sha256; do - # URL="https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" - # echo " - Fetching $f from $URL" - # wget "$URL" -O "$f" - # done - # chmod a+x $BINARY - # ls -al - - name: Fetch rc artifacts or release artifacts from s3 based on version - #this step runs only if the workflow is triggered manually - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} + # if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} run: | . ./.github/scripts/common/lib.sh - VERSION="${{ needs.validate-inputs.outputs.VERSION }}" + VERSION="${{ needs.validate-inputs.outputs.stable_tag }}" if [[ ${{ inputs.binary }} == 'polkadot' ]]; then bins=(polkadot polkadot-prepare-worker polkadot-execute-worker) for bin in "${bins[@]}"; do - fetch_release_artifacts_from_s3 $bin + fetch_release_artifacts_from_s3 $bin x86_64-unknown-linux-gnu done else - fetch_release_artifacts_from_s3 $BINARY + fetch_release_artifacts_from_s3 $BINARY x86_64-unknown-linux-gnu fi - - name: Fetch polkadot-omni-node/chain-spec-builder rc artifacts or release artifacts based on release id - #this step runs only if the workflow is triggered manually and only for chain-spec-builder - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && (inputs.binary == 'polkadot-omni-node' || inputs.binary == 'chain-spec-builder') }} - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_ID="${{ needs.validate-inputs.outputs.RELEASE_ID }}" - fetch_release_artifacts - - name: Upload artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: release-artifacts - path: release-artifacts/${{ env.BINARY }}/**/* + path: release-artifacts/x86_64-unknown-linux-gnu/${{ env.BINARY }}/**/* build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} @@ -173,7 +134,7 @@ jobs: uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - name: Check sha256 ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -182,7 +143,7 @@ jobs: check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -190,35 +151,29 @@ jobs: check_gpg $BINARY - name: Fetch rc commit and tag + working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs + shell: bash run: | - . ./.github/scripts/common/lib.sh - - echo "release=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + . ../.github/scripts/common/lib.sh commit=$(git rev-parse --short HEAD) && \ echo "commit=${commit}" >> $GITHUB_OUTPUT - - echo "tag=${{ needs.validate-inputs.outputs.version }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "tag=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Fetch release tags working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs + shell: bash run: | - chmod a+rx $BINARY - - if [[ $BINARY != 'chain-spec-builder' ]]; then - VERSION=$(./$BINARY --version | awk '{ print $2 }' ) - release=$( echo $VERSION | cut -f1 -d- ) - else - release=$(echo ${{ needs.validate-inputs.outputs.VERSION }} | sed 's/^v//') - fi + . ../.github/scripts/common/lib.sh echo "tag=latest" >> $GITHUB_OUTPUT - echo "release=${release}" >> $GITHUB_OUTPUT - echo "stable=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build Injected Container image for polkadot rc if: ${{ env.BINARY == 'polkadot' }} @@ -342,8 +297,10 @@ jobs: - name: Fetch values id: fetch-data run: | + . ./.github/scripts/common/lib.sh date=$(date -u '+%Y-%m-%dT%H:%M:%SZ') echo "date=$date" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build and push id: docker_build @@ -354,9 +311,9 @@ jobs: # TODO: The owner should be used below but buildx does not resolve the VARs # TODO: It would be good to get rid of this GHA that we don't really need. tags: | - parity/polkadot:${{ needs.validate-inputs.outputs.stable_tag }} - parity/polkadot:latest - parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} + egorpop/polkadot:${{ steps.fetch-data.outputs.stable }} + egorpop/polkadot:latest + egorpop/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | VCS_REF=${{ github.ref }} POLKADOT_VERSION=${{ needs.fetch-latest-debian-package-version.outputs.polkadot_apt_version }} diff --git a/.github/workflows/release-build-rc.yml b/.github/workflows/release-build-rc.yml deleted file mode 100644 index 94bacf320898..000000000000 --- a/.github/workflows/release-build-rc.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: Release - Build node release candidate - -on: - workflow_dispatch: - inputs: - binary: - description: Binary to be build for the release - default: all - type: choice - options: - - polkadot - - polkadot-parachain - - all - - release_tag: - description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM - type: string - -jobs: - check-synchronization: - uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main - - validate-inputs: - needs: [check-synchronization] - if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' - runs-on: ubuntu-latest - outputs: - release_tag: ${{ steps.validate_inputs.outputs.release_tag }} - - steps: - - name: Checkout sources - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - - name: Validate inputs - id: validate_inputs - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) - echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT - - build-polkadot-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' - package: polkadot - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-polkadot-parachain-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["polkadot-parachain"]' - package: "polkadot-parachain-bin" - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read diff --git a/.github/workflows/release-reusable-promote-to-final.yml b/.github/workflows/release-reusable-promote-to-final.yml new file mode 100644 index 000000000000..ed4a80a01e82 --- /dev/null +++ b/.github/workflows/release-reusable-promote-to-final.yml @@ -0,0 +1,83 @@ +name: Promote rc to final + +on: + workflow_call: + inputs: + package: + description: Package to be promoted + required: true + type: string + + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX taht will be changed to final in form of polkadot-stableYYMM(-X) + required: true + type: string + + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + required: true + type: string + + secrets: + AWS_DEFAULT_REGION: + required: true + AWS_RELEASE_ACCESS_KEY_ID: + required: true + AWS_RELEASE_SECRET_ACCESS_KEY: + required: true + +jobs: + + promote-release-artifacts: + environment: release + runs-on: ubuntu-latest + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Prepare final tag + id: prepare_final_tag + shell: bash + run: | + tag="$(echo ${{ inputs.release_tag }} | sed 's/-rc[0-9]*$//')" + echo $tag + echo "FINAL_TAG=${tag}" >> $GITHUB_OUTPUT + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ inputs.release_tag }}" + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + fetch_release_artifacts_from_s3 $package ${{ inputs.target }} + done + else + fetch_release_artifacts_from_s3 ${{ inputs.package }} ${{ inputs.target }} + fi + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ env.AWS_REGION }} + + - name: Upload ${{ inputs.package }} ${{ inputs.target }} artifacts to s3 + run: | + . ./.github/scripts/release/release_lib.sh + + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + upload_s3_release $package ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + done + else + upload_s3_release ${{ inputs.package }} ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + fi diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index d925839fb84a..0222b2aa91e2 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -10,7 +10,7 @@ on: type: string package: - description: Package to be built, for now is either polkadot or polkadot-parachain-bin + description: Package to be built, for now can be polkadot, polkadot-parachain-bin, or polkadot-omni-node required: true type: string @@ -19,6 +19,11 @@ on: required: true type: string + target: + description: Target triple for which the artifacts are being built (e.g. x86_64-unknown-linux-gnu) + required: true + type: string + secrets: PGP_KMS_KEY: required: true @@ -57,6 +62,7 @@ jobs: run: cat .github/env >> $GITHUB_OUTPUT build-rc: + if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [set-image] runs-on: ubuntu-latest-m environment: release @@ -98,7 +104,7 @@ jobs: ./.github/scripts/release/build-linux-release.sh ${{ matrix.binaries }} ${{ inputs.package }} - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: /artifacts/${{ matrix.binaries }}/${{ matrix.binaries }} @@ -127,11 +133,127 @@ jobs: - name: Upload ${{ matrix.binaries }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.binaries }} + name: ${{ matrix.binaries }}_${{ inputs.target }} path: /artifacts/${{ matrix.binaries }} + build-macos-rc: + if: ${{ inputs.target == 'aarch64-apple-darwin' }} + runs-on: parity-macos + environment: release + strategy: + matrix: + binaries: ${{ fromJSON(inputs.binary) }} + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + SKIP_WASM_BUILD: 1 + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + ref: ${{ inputs.release_tag }} + fetch-depth: 0 + + - name: Set rust version from env file + run: | + RUST_VERSION=$(cat .github/env | sed -E 's/.*ci-unified:([^-]+)-([^-]+).*/\2/') + echo $RUST_VERSION + echo "RUST_VERSION=${RUST_VERSION}" >> $GITHUB_ENV + - name: Set workspace environment variable + # relevant for artifacts upload, which can not interpolate Github Action variable syntax when + # used within valid paths. We can not use root-based paths either, since it is set as read-only + # on the `parity-macos` runner. + run: echo "ARTIFACTS_PATH=${GITHUB_WORKSPACE}/artifacts/${{ matrix.binaries }}" >> $GITHUB_ENV + + - name: Set up Homebrew + uses: Homebrew/actions/setup-homebrew@1ccc07ccd54b6048295516a3eb89b192c35057dc # master from 12.09.2024 + - name: Set homebrew binaries location on path + run: echo "/opt/homebrew/bin" >> $GITHUB_PATH + + - name: Install rust ${{ env.RUST_VERSION }} + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 + with: + cache: false + toolchain: ${{ env.RUST_VERSION }} + target: wasm32-unknown-unknown + components: cargo, clippy, rust-docs, rust-src, rustfmt, rustc, rust-std + + - name: cargo info + run: | + echo "######## rustup show ########" + rustup show + echo "######## cargo --version ########" + cargo --version + + - name: Install protobuf + run: brew install protobuf + - name: Install gpg + run: | + brew install gnupg + # Setup for being able to resolve: keyserver.ubuntu.com. + # See: https://github.com/actions/runner-images/issues/9777 + mkdir -p ~/.gnupg/ + touch ~/.gnupg/dirmngr.conf + echo "standard-resolver" > ~/.gnupg/dirmngr.conf + - name: Install sha256sum + run: | + brew install coreutils + + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign built artifacts + python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69" --break-system-packages + + - name: Import gpg keys + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Build binary + run: | + git config --global --add safe.directory "${GITHUB_WORKSPACE}" #avoid "detected dubious ownership" error + ./.github/scripts/release/build-macos-release.sh ${{ matrix.binaries }} ${{ inputs.package }} + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 + with: + subject-path: ${{ env.ARTIFACTS_PATH }}/${{ matrix.binaries }} + + - name: Sign artifacts + working-directory: ${{ env.ARTIFACTS_PATH }} + run: | + python3 -m pgpkms sign --input ${{matrix.binaries }} -o ${{ matrix.binaries }}.asc + + - name: Check sha256 ${{ matrix.binaries }} + working-directory: ${{ env.ARTIFACTS_PATH }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + echo "Checking binary ${{ matrix.binaries }}" + check_sha256 ${{ matrix.binaries }} + + - name: Check GPG ${{ matrix.binaries }} + working-directory: ${{ env.ARTIFACTS_PATH }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + check_gpg ${{ matrix.binaries }} + + - name: Upload ${{ matrix.binaries }} artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binaries }}_${{ inputs.target }} + path: ${{ env.ARTIFACTS_PATH }} + build-polkadot-deb-package: - if: ${{ inputs.package == 'polkadot' }} + if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] runs-on: ubuntu-latest @@ -156,37 +278,170 @@ jobs: . "${GITHUB_WORKSPACE}"/.github/scripts/release/build-deb.sh ${{ inputs.package }} ${VERSION} - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: target/production/*.deb - name: Upload ${{inputs.package }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ inputs.package }} + name: ${{ inputs.package }}_${{ inputs.target }} path: target/production overwrite: true upload-polkadot-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot' }} + if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-polkadot-deb-package] uses: ./.github/workflows/release-reusable-s3-upload.yml with: package: ${{ inputs.package }} release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - upload-polkadot-parachain-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot-parachain-bin' }} + if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] uses: ./.github/workflows/release-reusable-s3-upload.yml with: package: polkadot-parachain release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-omni-node-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-frame-omni-bencher-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + # TODO: add and use a `build-polkadot-homebrew-package` which packs all `polkadot` binaries: + # `polkadot`, `polkadot-prepare-worker` and `polkadot-execute-worker`. + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-prepare-worker-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-prepare-worker + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-execute-worker-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-execute-worker + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-omni-node-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-parachain-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-parachain + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-frame-omni-bencher-macos-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-macos-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} diff --git a/.github/workflows/release-reusable-s3-upload.yml b/.github/workflows/release-reusable-s3-upload.yml index 6776b78da8e6..48c7e53c6c8f 100644 --- a/.github/workflows/release-reusable-s3-upload.yml +++ b/.github/workflows/release-reusable-s3-upload.yml @@ -9,7 +9,12 @@ on: type: string release_tag: - description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM-rcX + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM-rcX + required: true + type: string + + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) required: true type: string @@ -34,11 +39,11 @@ jobs: - name: Checkout uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - name: Download artifacts + - name: Download amd64 artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: ${{ inputs.package }} - path: artifacts/${{ inputs.package }} + name: ${{ inputs.package }}_${{ inputs.target }} + path: release-artifacts/${{ inputs.target }}/${{ inputs.package }} - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 @@ -50,4 +55,4 @@ jobs: - name: Upload ${{ inputs.package }} artifacts to s3 run: | . ./.github/scripts/release/release_lib.sh - upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} + upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} ${{ inputs.target }} diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 9a29b46d2fc3..fc10496d481b 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -1,7 +1,7 @@ name: Srtool build env: - SUBWASM_VERSION: 0.20.0 + SUBWASM_VERSION: 0.21.0 TOML_CLI_VERSION: 0.2.4 on: @@ -11,14 +11,16 @@ on: type: string build_opts: type: string + profile: + type: string outputs: published_runtimes: value: ${{ jobs.find-runtimes.outputs.runtime }} - schedule: - - cron: "00 02 * * 1" # 2AM weekly on monday - - workflow_dispatch: +permissions: + id-token: write + attestations: write + contents: read jobs: find-runtimes: @@ -75,6 +77,7 @@ jobs: with: chain: ${{ matrix.chain }} runtime_dir: ${{ matrix.runtime_dir }} + profile: ${{ inputs.profile }} - name: Summary run: | @@ -83,6 +86,11 @@ jobs: echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}" echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}" + - name: Generate artifact attestation + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + with: + subject-path: ${{ steps.srtool_build.outputs.wasm }} + # We now get extra information thanks to subwasm - name: Install subwasm run: | diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json index f991db55b86d..104e73521331 100644 --- a/.github/workflows/runtimes-matrix.json +++ b/.github/workflows/runtimes-matrix.json @@ -8,6 +8,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage", "uri": null, + "old_package": "staging-node-cli", + "old_bin": "substrate-node", "is_relay": false }, { @@ -19,6 +21,8 @@ "bench_flags": "", "bench_features": "runtime-benchmarks", "uri": "wss://try-runtime-westend.polkadot.io:443", + "old_package": "polkadot", + "old_bin": "polkadot", "is_relay": true }, { @@ -27,9 +31,11 @@ "path": "polkadot/runtime/rococo", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", - "uri": "wss://try-runtime-rococo.polkadot.io:443", "bench_features": "runtime-benchmarks", "bench_flags": "", + "uri": "wss://try-runtime-rococo.polkadot.io:443", + "old_package": "polkadot", + "old_bin": "polkadot", "is_relay": true }, { @@ -41,6 +47,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://westend-asset-hub-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -52,6 +60,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://rococo-asset-hub-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -63,6 +73,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://rococo-bridge-hub-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -74,6 +86,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://westend-bridge-hub-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -84,7 +98,10 @@ "template": "cumulus/templates/xcm-bench-template.hbs", "bench_features": "runtime-benchmarks", "bench_flags": "", - "uri": "wss://westend-collectives-rpc.polkadot.io:443" + "uri": "wss://westend-collectives-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", + "is_relay": false }, { "name": "contracts-rococo", @@ -95,6 +112,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm", "uri": "wss://rococo-contracts-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -106,6 +125,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://rococo-coretime-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -117,6 +138,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://westend-coretime-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -128,6 +151,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none", "uri": null, + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -139,6 +164,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://rococo-people-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -150,6 +177,8 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://westend-people-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false } ] diff --git a/.github/workflows/tests-linux-stable-coverage.yml b/.github/workflows/tests-linux-stable-coverage.yml index c5af6bcae77f..61e01cda4428 100644 --- a/.github/workflows/tests-linux-stable-coverage.yml +++ b/.github/workflows/tests-linux-stable-coverage.yml @@ -102,7 +102,7 @@ jobs: merge-multiple: true - run: ls -al reports/ - name: Upload to Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index b9d0605b2495..3f8dc4fe1240 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -37,7 +37,7 @@ jobs: id: required run: WASM_BUILD_NO_COLOR=1 forklift cargo test -p staging-node-cli --release --locked -- --ignored - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -63,7 +63,7 @@ jobs: id: required run: forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet --cargo-quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -113,7 +113,7 @@ jobs: if: ${{ matrix.partition == '1/3' }} run: forklift cargo nextest run -p sp-api-test --features enable-staging-api --cargo-quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -155,7 +155,7 @@ jobs: --filter-expr " !test(/all_security_features_work/) - test(/nonexistent_cache_dir/)" \ --partition count:${{ matrix.partition }} \ - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index cca32650b106..decd88f2e84c 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -165,12 +165,14 @@ jobs: - name: Download artifact (master run) uses: actions/download-artifact@v4.1.8 + continue-on-error: true with: name: cargo-check-benches-master-${{ github.sha }} path: ./artifacts/master - name: Download artifact (current run) uses: actions/download-artifact@v4.1.8 + continue-on-error: true with: name: cargo-check-benches-current-${{ github.sha }} path: ./artifacts/current @@ -183,6 +185,12 @@ jobs: exit 0 fi + # fail if no artifacts + if [ ! -d ./artifacts/master ] || [ ! -d ./artifacts/current ]; then + echo "No artifacts found" + exit 1 + fi + docker run --rm \ -v $PWD/artifacts/master:/artifacts/master \ -v $PWD/artifacts/current:/artifacts/current \ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f508404f1efa..42a7e87bda43 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,7 @@ workflow: variables: # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/.gitlab/pipeline/zombienet/parachain-template.yml b/.gitlab/pipeline/zombienet/parachain-template.yml index 896ba7913be7..d5c1b6558b39 100644 --- a/.gitlab/pipeline/zombienet/parachain-template.yml +++ b/.gitlab/pipeline/zombienet/parachain-template.yml @@ -43,4 +43,4 @@ zombienet-parachain-template-smoke: - ls -ltr $(pwd)/artifacts - cargo test -p template-zombienet-tests --features zombienet --tests minimal_template_block_production_test - cargo test -p template-zombienet-tests --features zombienet --tests parachain_template_block_production_test - # - cargo test -p template-zombienet-tests --features zombienet --tests solochain_template_block_production_test + - cargo test -p template-zombienet-tests --features zombienet --tests solochain_template_block_production_test diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 3dab49a118e5..e722239d890c 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -179,7 +179,7 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: --local-dir="${LOCAL_DIR}/elastic_scaling" --test="0001-basic-3cores-6s-blocks.zndsl" -zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: +.zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: extends: - .zombienet-polkadot-common before_script: @@ -233,7 +233,7 @@ zombienet-polkadot-functional-0015-coretime-shared-core: --local-dir="${LOCAL_DIR}/functional" --test="0016-approval-voting-parallel.zndsl" -zombienet-polkadot-functional-0017-sync-backing: +.zombienet-polkadot-functional-0017-sync-backing: extends: - .zombienet-polkadot-common script: @@ -252,6 +252,17 @@ zombienet-polkadot-functional-0018-shared-core-idle-parachain: --local-dir="${LOCAL_DIR}/functional" --test="0018-shared-core-idle-parachain.zndsl" +zombienet-polkadot-functional-0019-coretime-collation-fetching-fairness: + extends: + - .zombienet-polkadot-common + before_script: + - !reference [ .zombienet-polkadot-common, before_script ] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0019-coretime-collation-fetching-fairness.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/Cargo.lock b/Cargo.lock index 02d7da8f7657..13ba03af5065 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -125,6 +125,48 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +[[package]] +name = "alloy-core" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c618bd382f0bc2ac26a7e4bfae01c9b015ca8f21b37ca40059ae35a7e62b3dc6" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives 0.8.15", + "alloy-rlp", + "alloy-sol-types 0.8.15", +] + +[[package]] +name = "alloy-dyn-abi" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41056bde53ae10ffbbf11618efbe1e0290859e5eab0fe9ef82ebdb62f12a866f" +dependencies = [ + "alloy-json-abi", + "alloy-primitives 0.8.15", + "alloy-sol-type-parser", + "alloy-sol-types 0.8.15", + "const-hex", + "itoa", + "serde", + "serde_json", + "winnow 0.6.18", +] + +[[package]] +name = "alloy-json-abi" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c357da577dfb56998d01f574d81ad7a1958d248740a7981b205d69d65a7da404" +dependencies = [ + "alloy-primitives 0.8.15", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + [[package]] name = "alloy-primitives" version = "0.4.2" @@ -145,6 +187,34 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "alloy-primitives" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 1.0.0", + "foldhash", + "hashbrown 0.15.2", + "hex-literal", + "indexmap 2.7.0", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand", + "ruint", + "rustc-hash 2.0.0", + "serde", + "sha3 0.10.8", + "tiny-keccak", +] + [[package]] name = "alloy-rlp" version = "0.3.3" @@ -169,18 +239,88 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", - "syn-solidity", + "syn-solidity 0.4.2", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9d64f851d95619233f74b310f12bcf16e0cbc27ee3762b6115c14a84809280a" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bf7ed1574b699f48bf17caab4e6e54c6d12bc3c006ab33d58b1e227c1c3559f" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap 2.7.0", + "proc-macro-error2", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", + "syn-solidity 0.8.15", "tiny-keccak", ] +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c02997ccef5f34f9c099277d4145f183b422938ed5322dc57a089fe9b9ad9ee" +dependencies = [ + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", + "syn-solidity 0.8.15", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce13ff37285b0870d0a0746992a4ae48efaf34b766ae4c2640fa15e5305f8e73" +dependencies = [ + "serde", + "winnow 0.6.18", +] + [[package]] name = "alloy-sol-types" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98d7107bed88e8f09f0ddcc3335622d87bfb6821f3e0c7473329fb1cfad5e015" dependencies = [ - "alloy-primitives", - "alloy-sol-macro", + "alloy-primitives 0.4.2", + "alloy-sol-macro 0.4.2", + "const-hex", + "serde", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1174cafd6c6d810711b4e00383037bdb458efc4fe3dbafafa16567e0320c54d8" +dependencies = [ + "alloy-json-abi", + "alloy-primitives 0.8.15", + "alloy-sol-macro 0.8.15", "const-hex", "serde", ] @@ -701,30 +841,14 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "asn1-rs" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" -dependencies = [ - "asn1-rs-derive 0.4.0", - "asn1-rs-impl 0.1.0", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time", -] - [[package]] name = "asn1-rs" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" dependencies = [ - "asn1-rs-derive 0.5.0", - "asn1-rs-impl 0.2.0", + "asn1-rs-derive", + "asn1-rs-impl", "displaydoc", "nom", "num-traits", @@ -733,18 +857,6 @@ dependencies = [ "time", ] -[[package]] -name = "asn1-rs-derive" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 1.0.109", - "synstructure 0.12.6", -] - [[package]] name = "asn1-rs-derive" version = "0.5.0" @@ -757,17 +869,6 @@ dependencies = [ "synstructure 0.13.1", ] -[[package]] -name = "asn1-rs-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 1.0.109", -] - [[package]] name = "asn1-rs-impl" version = "0.2.0" @@ -899,6 +1000,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub-router 0.5.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -1036,6 +1138,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub-router 0.5.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -1077,6 +1180,7 @@ dependencies = [ "frame-support 28.0.0", "frame-system 28.0.0", "hex-literal", + "pallet-asset-conversion 10.0.0", "pallet-assets 29.1.0", "pallet-balances 28.0.0", "pallet-collator-selection 9.0.0", @@ -1094,6 +1198,7 @@ dependencies = [ "staging-xcm-builder 7.0.0", "staging-xcm-executor 7.0.0", "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -1291,7 +1396,7 @@ dependencies = [ "futures-lite 2.3.0", "parking", "polling 3.4.0", - "rustix 0.38.25", + "rustix 0.38.21", "slab", "tracing", "windows-sys 0.52.0", @@ -1373,7 +1478,7 @@ dependencies = [ "cfg-if", "event-listener 5.3.1", "futures-lite 2.3.0", - "rustix 0.38.25", + "rustix 0.38.21", "tracing", ] @@ -1389,7 +1494,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.25", + "rustix 0.38.21", "signal-hook-registry", "slab", "windows-sys 0.52.0", @@ -1474,6 +1579,19 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + [[package]] name = "atomic-take" version = "1.1.0" @@ -1934,6 +2052,8 @@ dependencies = [ "frame-support 28.0.0", "parity-scale-codec", "scale-info", + "sp-core 28.0.0", + "staging-xcm 7.0.0", ] [[package]] @@ -1944,6 +2064,8 @@ dependencies = [ "frame-support 28.0.0", "parity-scale-codec", "scale-info", + "sp-core 28.0.0", + "staging-xcm 7.0.0", ] [[package]] @@ -2537,6 +2659,7 @@ dependencies = [ "bp-rococo", "bp-runtime 0.7.0", "bp-westend", + "bp-xcm-bridge-hub-router 0.6.0", "bridge-hub-common 0.1.0", "bridge-hub-test-utils 0.7.0", "bridge-runtime-common 0.7.0", @@ -2578,6 +2701,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub 0.2.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -2774,6 +2898,7 @@ dependencies = [ "bp-rococo", "bp-runtime 0.7.0", "bp-westend", + "bp-xcm-bridge-hub-router 0.6.0", "bridge-hub-common 0.1.0", "bridge-hub-test-utils 0.7.0", "bridge-runtime-common 0.7.0", @@ -2815,6 +2940,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub 0.2.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -2989,6 +3115,9 @@ name = "bytes" version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +dependencies = [ + "serde", +] [[package]] name = "bzip2-sys" @@ -3159,6 +3288,7 @@ dependencies = [ name = "chain-spec-guide-runtime" version = "0.0.0" dependencies = [ + "cmd_lib", "docify", "frame-support 28.0.0", "pallet-balances 28.0.0", @@ -3550,6 +3680,7 @@ dependencies = [ "pallet-utility 28.0.0", "pallet-xcm 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -3745,9 +3876,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.10.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5104de16b218eddf8e34ffe2f86f74bfa4e61e95a1b89732fccf6325efd0557" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ "cfg-if", "cpufeatures", @@ -3991,6 +4122,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -4090,6 +4222,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -4497,6 +4630,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-timestamp 26.0.0", + "sp-trie 29.0.0", "substrate-prometheus-endpoint", "tokio", "tracing", @@ -4680,6 +4814,7 @@ dependencies = [ "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "futures", + "futures-timer", "polkadot-primitives 7.0.0", "sc-client-api", "sc-consensus", @@ -4795,7 +4930,6 @@ dependencies = [ "pallet-message-queue 31.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", "polkadot-runtime-parachains 7.0.0", "rand", "sc-client-api", @@ -5746,9 +5880,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" @@ -5789,27 +5923,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "der-parser" -version = "8.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" -dependencies = [ - "asn1-rs 0.5.2", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - [[package]] name = "der-parser" version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", "displaydoc", "nom", "num-bigint", @@ -5965,6 +6085,15 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.4.1" @@ -6268,18 +6397,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "enum-as-inner" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" -dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 1.0.109", -] - [[package]] name = "enum-as-inner" version = "0.6.0" @@ -6662,7 +6779,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb42427514b063d97ce21d5199f36c0c307d981434a6be32582bc79fe5bd2303" dependencies = [ "expander", - "indexmap 2.2.3", + "indexmap 2.7.0", "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", @@ -6853,6 +6970,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -6997,6 +7120,7 @@ dependencies = [ "sc-client-db", "sc-executor 0.32.0", "sc-executor-common 0.29.0", + "sc-runtime-utilities", "sc-service", "sc-sysinfo", "serde", @@ -7214,6 +7338,18 @@ dependencies = [ "serde", ] +[[package]] +name = "frame-metadata" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daaf440c68eb2c3d88e5760fe8c7af3f9fee9181fab6c2f2c4e7cc48dcc40bb8" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", + "serde", +] + [[package]] name = "frame-metadata-hash-extension" version = "0.1.0" @@ -7221,7 +7357,7 @@ dependencies = [ "array-bytes", "const-hex", "docify", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support 28.0.0", "frame-system 28.0.0", "log", @@ -7306,7 +7442,7 @@ dependencies = [ "bitflags 1.3.2", "docify", "environmental", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support-procedural 23.0.0", "frame-system 28.0.0", "impl-trait-for-tuples", @@ -7400,7 +7536,7 @@ dependencies = [ "macro_magic", "parity-scale-codec", "pretty_assertions", - "proc-macro-warning 1.0.0", + "proc-macro-warning", "proc-macro2 1.0.86", "quote 1.0.37", "regex", @@ -7427,7 +7563,7 @@ dependencies = [ "frame-support-procedural-tools 13.0.0", "itertools 0.11.0", "macro_magic", - "proc-macro-warning 1.0.0", + "proc-macro-warning", "proc-macro2 1.0.86", "quote 1.0.37", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -7484,7 +7620,7 @@ version = "3.0.0" dependencies = [ "frame-benchmarking 28.0.0", "frame-executive 28.0.0", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support 28.0.0", "frame-support-test-pallet", "frame-system 28.0.0", @@ -7677,7 +7813,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ - "rustix 0.38.25", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -7710,9 +7846,9 @@ dependencies = [ [[package]] name = "futures-bounded" -version = "0.1.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b07bbbe7d7e78809544c6f718d875627addc73a7c3582447abc052cd3dc67e0" +checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" dependencies = [ "futures-timer", "futures-util", @@ -7793,12 +7929,13 @@ dependencies = [ [[package]] name = "futures-rustls" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.21.7", + "rustls 0.23.18", + "rustls-pki-types", ] [[package]] @@ -7820,7 +7957,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ "gloo-timers", - "send_wrapper 0.4.0", + "send_wrapper", ] [[package]] @@ -8099,7 +8236,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.9", - "indexmap 2.2.3", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -8118,7 +8255,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.2.3", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -8189,6 +8326,16 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "foldhash", + "serde", +] + [[package]] name = "hashlink" version = "0.8.4" @@ -8239,6 +8386,9 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] [[package]] name = "hex-conservative" @@ -8270,7 +8420,7 @@ dependencies = [ "async-trait", "cfg-if", "data-encoding", - "enum-as-inner 0.6.0", + "enum-as-inner", "futures-channel", "futures-io", "futures-util", @@ -8278,6 +8428,7 @@ dependencies = [ "ipnet", "once_cell", "rand", + "socket2 0.5.7", "thiserror", "tinyvec", "tokio", @@ -8287,9 +8438,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" dependencies = [ "cfg-if", "futures-util", @@ -8529,7 +8680,7 @@ dependencies = [ "hyper 1.3.1", "hyper-util", "log", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -8612,17 +8763,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "0.4.0" @@ -8816,12 +8956,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.2", + "serde", ] [[package]] @@ -8935,7 +9076,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.9", - "rustix 0.38.25", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -9144,7 +9285,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -9197,7 +9338,7 @@ dependencies = [ "hyper-util", "jsonrpsee-core", "jsonrpsee-types", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-platform-verifier", "serde", "serde_json", @@ -9321,6 +9462,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "keccak-hash" version = "0.11.0" @@ -9614,9 +9765,31 @@ dependencies = [ "futures-timer", "getrandom", "instant", - "libp2p-allow-block-list", - "libp2p-connection-limits", - "libp2p-core", + "libp2p-allow-block-list 0.2.0", + "libp2p-connection-limits 0.2.1", + "libp2p-core 0.40.1", + "libp2p-identity", + "libp2p-swarm 0.43.7", + "multiaddr 0.18.1", + "pin-project", + "rw-stream-sink", + "thiserror", +] + +[[package]] +name = "libp2p" +version = "0.54.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom", + "libp2p-allow-block-list 0.4.0", + "libp2p-connection-limits 0.4.0", + "libp2p-core 0.42.0", "libp2p-dns", "libp2p-identify", "libp2p-identity", @@ -9627,10 +9800,9 @@ dependencies = [ "libp2p-ping", "libp2p-quic", "libp2p-request-response", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "libp2p-tcp", "libp2p-upnp", - "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", "multiaddr 0.18.1", @@ -9645,9 +9817,21 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" dependencies = [ - "libp2p-core", + "libp2p-core 0.40.1", + "libp2p-identity", + "libp2p-swarm 0.43.7", + "void", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" +dependencies = [ + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] @@ -9657,9 +9841,21 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ - "libp2p-core", + "libp2p-core 0.40.1", + "libp2p-identity", + "libp2p-swarm 0.43.7", + "void", +] + +[[package]] +name = "libp2p-connection-limits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +dependencies = [ + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] @@ -9691,42 +9887,70 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-core" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr 0.18.1", + "multihash 0.19.1", + "multistream-select", + "once_cell", + "parking_lot 0.12.3", + "pin-project", + "quick-protobuf 0.8.1", + "rand", + "rw-stream-sink", + "smallvec", + "thiserror", + "tracing", + "unsigned-varint 0.8.0", + "void", + "web-time", +] + [[package]] name = "libp2p-dns" -version = "0.40.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a18db73084b4da2871438f6239fef35190b05023de7656e877c18a00541a3b" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ "async-trait", "futures", - "libp2p-core", + "hickory-resolver", + "libp2p-core 0.42.0", "libp2p-identity", - "log", "parking_lot 0.12.3", "smallvec", - "trust-dns-resolver", + "tracing", ] [[package]] name = "libp2p-identify" -version = "0.43.1" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a96638a0a176bec0a4bcaebc1afa8cf909b114477209d7456ade52c61cd9cd" +checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "either", "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "lru 0.12.3", "quick-protobuf 0.8.1", "quick-protobuf-codec", "smallvec", "thiserror", + "tracing", "void", ] @@ -9750,83 +9974,84 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.44.6" +version = "0.46.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ea178dabba6dde6ffc260a8e0452ccdc8f79becf544946692fff9d412fc29d" +checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" dependencies = [ "arrayvec 0.7.4", - "asynchronous-codec", + "asynchronous-codec 0.7.0", "bytes", "either", "fnv", "futures", + "futures-bounded", "futures-timer", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "quick-protobuf 0.8.1", "quick-protobuf-codec", "rand", "sha2 0.10.8", "smallvec", "thiserror", + "tracing", "uint 0.9.5", - "unsigned-varint 0.7.2", "void", + "web-time", ] [[package]] name = "libp2p-mdns" -version = "0.44.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" dependencies = [ "data-encoding", "futures", + "hickory-proto", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "rand", "smallvec", "socket2 0.5.7", "tokio", - "trust-dns-proto 0.22.0", + "tracing", "void", ] [[package]] name = "libp2p-metrics" -version = "0.13.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ - "instant", - "libp2p-core", + "futures", + "libp2p-core 0.42.0", "libp2p-identify", "libp2p-identity", "libp2p-kad", "libp2p-ping", - "libp2p-swarm", - "once_cell", + "libp2p-swarm 0.45.1", + "pin-project", "prometheus-client", + "web-time", ] [[package]] name = "libp2p-noise" -version = "0.43.2" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2eeec39ad3ad0677551907dd304b2f13f17208ccebe333bef194076cd2e8921" +checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" dependencies = [ + "asynchronous-codec 0.7.0", "bytes", "curve25519-dalek 4.1.3", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "log", "multiaddr 0.18.1", "multihash 0.19.1", "once_cell", @@ -9836,68 +10061,71 @@ dependencies = [ "snow", "static_assertions", "thiserror", + "tracing", "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.43.1" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e702d75cd0827dfa15f8fd92d15b9932abe38d10d21f47c50438c71dd1b5dae3" +checksum = "005a34420359223b974ee344457095f027e51346e992d1e0dcd35173f4cdd422" dependencies = [ "either", "futures", "futures-timer", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "rand", + "tracing", "void", + "web-time", ] [[package]] name = "libp2p-quic" -version = "0.9.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130d451d83f21b81eb7b35b360bc7972aeafb15177784adc56528db082e6b927" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-tls", - "log", "parking_lot 0.12.3", - "quinn 0.10.2", + "quinn", "rand", - "ring 0.16.20", - "rustls 0.21.7", + "ring 0.17.8", + "rustls 0.23.18", "socket2 0.5.7", "thiserror", "tokio", + "tracing", ] [[package]] name = "libp2p-request-response" -version = "0.25.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e3b4d67870478db72bac87bfc260ee6641d0734e0e3e275798f089c3fecfd4" +checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" dependencies = [ "async-trait", "futures", - "instant", - "libp2p-core", + "futures-bounded", + "futures-timer", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "rand", "smallvec", + "tracing", "void", + "web-time", ] [[package]] @@ -9911,26 +10139,47 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm-derive", "log", "multistream-select", "once_cell", "rand", "smallvec", + "void", +] + +[[package]] +name = "libp2p-swarm" +version = "0.45.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm-derive", + "lru 0.12.3", + "multistream-select", + "once_cell", + "rand", + "smallvec", "tokio", + "tracing", "void", + "web-time", ] [[package]] name = "libp2p-swarm-derive" -version = "0.33.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" dependencies = [ - "heck 0.4.1", - "proc-macro-warning 0.4.2", + "heck 0.5.0", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -9938,102 +10187,90 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.40.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b558dd40d1bcd1aaaed9de898e9ec6a436019ecc2420dd0016e712fbb61c5508" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "log", "socket2 0.5.7", "tokio", + "tracing", ] [[package]] name = "libp2p-tls" -version = "0.2.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "rcgen", - "ring 0.16.20", - "rustls 0.21.7", + "rcgen 0.11.3", + "ring 0.17.8", + "rustls 0.23.18", "rustls-webpki 0.101.4", "thiserror", - "x509-parser 0.15.1", + "x509-parser", "yasna", ] [[package]] name = "libp2p-upnp" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82775a47b34f10f787ad3e2a22e2c1541e6ebef4fe9f28f3ac553921554c94c1" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core", - "libp2p-swarm", - "log", + "libp2p-core 0.42.0", + "libp2p-swarm 0.45.1", "tokio", + "tracing", "void", ] -[[package]] -name = "libp2p-wasm-ext" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e5d8e3a9e07da0ef5b55a9f26c009c8fb3c725d492d8bb4b431715786eea79c" -dependencies = [ - "futures", - "js-sys", - "libp2p-core", - "send_wrapper 0.6.0", - "wasm-bindgen", - "wasm-bindgen-futures", -] - [[package]] name = "libp2p-websocket" -version = "0.42.2" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004ee9c4a4631435169aee6aad2f62e3984dc031c43b6d29731e8e82a016c538" +checksum = "888b2ff2e5d8dcef97283daab35ad1043d18952b65e05279eecbe02af4c6e347" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "log", "parking_lot 0.12.3", "pin-project-lite", "rw-stream-sink", "soketto 0.8.0", "thiserror", + "tracing", "url", "webpki-roots 0.25.2", ] [[package]] name = "libp2p-yamux" -version = "0.44.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" +checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ + "either", "futures", - "libp2p-core", - "log", + "libp2p-core 0.42.0", "thiserror", - "yamux", + "tracing", + "yamux 0.12.1", + "yamux 0.13.3", ] [[package]] @@ -10158,9 +10395,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.11" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lioness" @@ -10194,9 +10431,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.8.1" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b67484b8ac41e1cfdf012f65fa81e88c2ef5f8a7d6dec0e2678c2d06dc04530" +checksum = "2b0fef34af8847e816003bf7fdeac5ea50b9a7a88441ac927a6166b5e812ab79" dependencies = [ "async-trait", "bs58", @@ -10207,7 +10444,7 @@ dependencies = [ "futures-timer", "hex-literal", "hickory-resolver", - "indexmap 2.2.3", + "indexmap 2.7.0", "libc", "mockall 0.13.0", "multiaddr 0.17.1", @@ -10219,7 +10456,7 @@ dependencies = [ "prost 0.12.6", "prost-build", "rand", - "rcgen", + "rcgen 0.10.0", "ring 0.16.20", "rustls 0.20.9", "serde", @@ -10239,7 +10476,7 @@ dependencies = [ "unsigned-varint 0.8.0", "url", "x25519-dalek", - "x509-parser 0.16.0", + "x509-parser", "yasna", "zeroize", ] @@ -10432,12 +10669,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matrixmultiply" version = "0.3.7" @@ -10510,13 +10741,13 @@ dependencies = [ [[package]] name = "merkleized-metadata" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f313fcff1d2a4bcaa2deeaa00bf7530d77d5f7bd0467a117dde2e29a75a7a17a" +checksum = "38c592efaf1b3250df14c8f3c2d952233f0302bb81d3586db2f303666c1cd607" dependencies = [ "array-bytes", "blake3", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "parity-scale-codec", "scale-decode 0.13.1", "scale-info", @@ -11440,22 +11671,13 @@ dependencies = [ "memchr", ] -[[package]] -name = "oid-registry" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" -dependencies = [ - "asn1-rs 0.5.2", -] - [[package]] name = "oid-registry" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", ] [[package]] @@ -11556,7 +11778,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7b1d40dd8f367db3c65bec8d3dd47d4a604ee8874480738f93191bddab4e0e0" dependencies = [ "expander", - "indexmap 2.2.3", + "indexmap 2.7.0", "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", @@ -14610,11 +14832,8 @@ dependencies = [ "hex", "hex-literal", "impl-trait-for-tuples", - "jsonrpsee", "log", - "pallet-assets 29.1.0", "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", "pallet-proxy 28.0.0", "pallet-revive-fixtures 0.1.0", "pallet-revive-proc-macro 0.1.0", @@ -14624,7 +14843,7 @@ dependencies = [ "pallet-utility 28.0.0", "parity-scale-codec", "paste", - "polkavm 0.13.0", + "polkavm 0.17.0", "pretty_assertions", "rlp 0.6.1", "scale-info", @@ -14720,12 +14939,10 @@ dependencies = [ "anyhow", "frame-system 28.0.0", "log", - "parity-wasm", - "polkavm-linker 0.14.0", + "polkavm-linker 0.17.1", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "tempfile", "toml 0.8.12", ] @@ -14842,7 +15059,7 @@ dependencies = [ "bitflags 1.3.2", "parity-scale-codec", "paste", - "polkavm-derive 0.14.0", + "polkavm-derive 0.17.0", "scale-info", ] @@ -15956,6 +16173,7 @@ dependencies = [ "bp-messages 0.7.0", "bp-runtime 0.7.0", "bp-xcm-bridge-hub 0.2.0", + "bp-xcm-bridge-hub-router 0.6.0", "frame-support 28.0.0", "frame-system 28.0.0", "log", @@ -16164,6 +16382,7 @@ dependencies = [ "pallet-session 28.0.0", "pallet-timestamp 27.0.0", "pallet-xcm 7.0.0", + "parachains-common 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "sp-consensus-aura 0.32.0", @@ -16175,6 +16394,7 @@ dependencies = [ "staging-xcm 7.0.0", "staging-xcm-executor 7.0.0", "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -16573,6 +16793,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -16630,6 +16851,7 @@ dependencies = [ "sp-runtime 31.0.1", "staging-xcm 7.0.0", "staging-xcm-executor 7.0.0", + "westend-runtime", "westend-runtime-constants 7.0.0", "westend-system-emulated-network", ] @@ -16674,6 +16896,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -16758,7 +16981,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.3", + "indexmap 2.7.0", ] [[package]] @@ -17087,7 +17310,7 @@ dependencies = [ "fatality", "futures", "futures-timer", - "indexmap 2.2.3", + "indexmap 2.7.0", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -17553,6 +17776,7 @@ dependencies = [ "rococo-runtime", "rusty-fork", "sc-sysinfo", + "sc-tracing", "slotmap", "sp-core 28.0.0", "sp-maybe-compressed-blob 11.0.0", @@ -17882,6 +18106,7 @@ dependencies = [ "cumulus-primitives-aura 0.7.0", "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-interface", + "cumulus-test-runtime", "docify", "frame-benchmarking 28.0.0", "frame-benchmarking-cli", @@ -17910,22 +18135,28 @@ dependencies = [ "sc-executor 0.32.0", "sc-network", "sc-rpc", + "sc-runtime-utilities", "sc-service", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", + "scale-info", "serde", "serde_json", "sp-api 26.0.0", "sp-block-builder 26.0.0", + "sp-consensus", "sp-consensus-aura 0.32.0", "sp-core 28.0.0", + "sp-crypto-hashing 0.1.0", "sp-genesis-builder 0.8.0", "sp-inherents 26.0.0", + "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-session 27.0.0", + "sp-storage 19.0.0", "sp-timestamp 26.0.0", "sp-transaction-pool 26.0.0", "sp-version 29.0.0", @@ -17933,6 +18164,7 @@ dependencies = [ "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-state-trie-migration-rpc", + "subxt-metadata", "tokio", "wait-timeout", ] @@ -18541,7 +18773,6 @@ dependencies = [ "pallet-remark 28.0.0", "pallet-revive 0.1.0", "pallet-revive-eth-rpc", - "pallet-revive-fixtures 0.1.0", "pallet-revive-mock-network 0.1.0", "pallet-revive-proc-macro 0.1.0", "pallet-revive-uapi 0.1.0", @@ -18671,6 +18902,7 @@ dependencies = [ "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", + "sc-runtime-utilities", "sc-service", "sc-state-db", "sc-statement-store", @@ -19016,6 +19248,7 @@ version = "0.0.1" dependencies = [ "assert_cmd", "chain-spec-guide-runtime", + "cmd_lib", "cumulus-client-service", "cumulus-pallet-aura-ext 0.7.0", "cumulus-pallet-parachain-system 0.7.0", @@ -19333,7 +19566,7 @@ dependencies = [ "fatality", "futures", "futures-timer", - "indexmap 2.2.3", + "indexmap 2.7.0", "parity-scale-codec", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -19672,15 +19905,28 @@ dependencies = [ [[package]] name = "polkavm" -version = "0.13.0" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84979be196ba2855f73616413e7b1d18258128aa396b3dc23f520a00a807720e" +dependencies = [ + "libc", + "log", + "polkavm-assembler 0.17.0", + "polkavm-common 0.17.0", + "polkavm-linux-raw 0.17.0", +] + +[[package]] +name = "polkavm" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e79a14b15ed38cb5b9a1e38d02e933f19e3d180ae5b325fed606c5e5b9177e" +checksum = "dd044ab1d3b11567ab6b98ca71259a992b4034220d5972988a0e96518e5d343d" dependencies = [ "libc", "log", - "polkavm-assembler 0.13.0", - "polkavm-common 0.13.0", - "polkavm-linux-raw 0.13.0", + "polkavm-assembler 0.18.0", + "polkavm-common 0.18.0", + "polkavm-linux-raw 0.18.0", ] [[package]] @@ -19703,9 +19949,18 @@ dependencies = [ [[package]] name = "polkavm-assembler" -version = "0.13.0" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ba7b434ff630b0f73a1560e8baea807246ca22098abe49f97821e0e2d2accc4" +dependencies = [ + "log", +] + +[[package]] +name = "polkavm-assembler" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8da55465000feb0a61bbf556ed03024db58f3420eca37721fc726b3b2136bf" +checksum = "eaad38dc420bfed79e6f731471c973ce5ff5e47ab403e63cf40358fef8a6368f" dependencies = [ "log", ] @@ -19737,19 +19992,23 @@ dependencies = [ [[package]] name = "polkavm-common" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084b4339aae7dfdaaa5aa7d634110afd95970e0737b6fb2a0cb10db8b56b753c" +checksum = "8f0dbafef4ab6ceecb4982ac3b550df430ef4f9fdbf07c108b7d4f91a0682fce" dependencies = [ "log", - "polkavm-assembler 0.13.0", + "polkavm-assembler 0.17.0", ] [[package]] name = "polkavm-common" -version = "0.14.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711952a783e9c5ad407cdacb1ed147f36d37c5d43417c1091d86456d2999417b" +checksum = "31ff33982a807d8567645d4784b9b5d7ab87bcb494f534a57cadd9012688e102" +dependencies = [ + "log", + "polkavm-assembler 0.18.0", +] [[package]] name = "polkavm-derive" @@ -19780,11 +20039,20 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.14.0" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0c3dbb6c8c7bd3e5f5b05aa7fc9355acf14df7ce5d392911e77d01090a38d0d" +dependencies = [ + "polkavm-derive-impl-macro 0.17.0", +] + +[[package]] +name = "polkavm-derive" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4832a0aebf6cefc988bb7b2d74ea8c86c983164672e2fc96300f356a1babfc1" +checksum = "c2eb703f3b6404c13228402e98a5eae063fd16b8f58afe334073ec105ee4117e" dependencies = [ - "polkavm-derive-impl-macro 0.14.0", + "polkavm-derive-impl-macro 0.18.0", ] [[package]] @@ -19825,11 +20093,23 @@ dependencies = [ [[package]] name = "polkavm-derive-impl" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e339fc7c11310fe5adf711d9342278ac44a75c9784947937cce12bd4f30842f2" +checksum = "42565aed4adbc4034612d0b17dea8db3681fb1bd1aed040d6edc5455a9f478a1" dependencies = [ - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + +[[package]] +name = "polkavm-derive-impl" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12d2840cc62a0550156b1676fed8392271ddf2fab4a00661db56231424674624" +dependencies = [ + "polkavm-common 0.18.0", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -19867,11 +20147,21 @@ dependencies = [ [[package]] name = "polkavm-derive-impl-macro" -version = "0.14.0" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d9838e95241b0bce4fe269cdd4af96464160505840ed5a8ac8536119ba19e2" +dependencies = [ + "polkavm-derive-impl 0.17.0", + "syn 2.0.87", +] + +[[package]] +name = "polkavm-derive-impl-macro" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b569754b15060d03000c09e3bf11509d527f60b75d79b4c30c3625b5071d9702" +checksum = "48c16669ddc7433e34c1007d31080b80901e3e8e523cb9d4b441c3910cf9294b" dependencies = [ - "polkavm-derive-impl 0.14.0", + "polkavm-derive-impl 0.18.0", "syn 2.0.87", ] @@ -19907,15 +20197,32 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.14.0" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0422ead3030d5cde69e2206dbc7d65da872b121876507cd5363f6c6e6aa45157" +dependencies = [ + "dirs", + "gimli 0.31.1", + "hashbrown 0.14.5", + "log", + "object 0.36.1", + "polkavm-common 0.17.0", + "regalloc2 0.9.3", + "rustc-demangle", +] + +[[package]] +name = "polkavm-linker" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0959ac3b0f4fd5caf5c245c637705f19493efe83dba31a83bbba928b93b0116a" +checksum = "e9bfe793b094d9ea5c99b7c43ba46e277b0f8f48f4bbfdbabf8d3ebf701a4bd3" dependencies = [ + "dirs", "gimli 0.31.1", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.14.0", + "polkavm-common 0.18.0", "regalloc2 0.9.3", "rustc-demangle", ] @@ -19934,9 +20241,15 @@ checksum = "26e45fa59c7e1bb12ef5289080601e9ec9b31435f6e32800a5c90c132453d126" [[package]] name = "polkavm-linux-raw" -version = "0.13.0" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64c3d93a58ffbc3099d1227f0da9675a025a9ea6c917038f266920c1de1e568" + +[[package]] +name = "polkavm-linux-raw" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686c4dd9c9c16cc22565b51bdbb269792318d0fd2e6b966b5f6c788534cad0e9" +checksum = "23eff02c070c70f31878a3d915e88a914ecf3e153741e2fb572dde28cce20fde" [[package]] name = "polling" @@ -19963,7 +20276,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.25", + "rustix 0.38.21", "tracing", "windows-sys 0.52.0", ] @@ -20230,17 +20543,6 @@ version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" -[[package]] -name = "proc-macro-warning" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - [[package]] name = "proc-macro-warning" version = "1.0.0" @@ -20282,7 +20584,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.25", + "rustix 0.38.21", ] [[package]] @@ -20312,9 +20614,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.21.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" dependencies = [ "dtoa", "itoa", @@ -20545,15 +20847,15 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" +checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "bytes", "quick-protobuf 0.8.1", "thiserror", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] @@ -20578,24 +20880,6 @@ dependencies = [ "rand", ] -[[package]] -name = "quinn" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" -dependencies = [ - "bytes", - "futures-io", - "pin-project-lite", - "quinn-proto 0.10.6", - "quinn-udp 0.4.1", - "rustc-hash 1.1.0", - "rustls 0.21.7", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "quinn" version = "0.11.5" @@ -20603,34 +20887,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", + "futures-io", "pin-project-lite", - "quinn-proto 0.11.8", - "quinn-udp 0.5.4", + "quinn-proto", + "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.14", + "rustls 0.23.18", "socket2 0.5.7", "thiserror", "tokio", "tracing", ] -[[package]] -name = "quinn-proto" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" -dependencies = [ - "bytes", - "rand", - "ring 0.16.20", - "rustc-hash 1.1.0", - "rustls 0.21.7", - "slab", - "thiserror", - "tinyvec", - "tracing", -] - [[package]] name = "quinn-proto" version = "0.11.8" @@ -20639,28 +20907,15 @@ checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand", - "ring 0.17.7", + "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.14", + "rustls 0.23.18", "slab", "thiserror", "tinyvec", "tracing", ] -[[package]] -name = "quinn-udp" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" -dependencies = [ - "bytes", - "libc", - "socket2 0.5.7", - "tracing", - "windows-sys 0.48.0", -] - [[package]] name = "quinn-udp" version = "0.5.4" @@ -20707,6 +20962,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core 0.6.4", + "serde", ] [[package]] @@ -20829,6 +21085,18 @@ dependencies = [ "yasna", ] +[[package]] +name = "rcgen" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" +dependencies = [ + "pem 3.0.4", + "ring 0.16.20", + "time", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -20932,7 +21200,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", + "regex-automata 0.4.8", "regex-syntax 0.8.5", ] @@ -20953,9 +21221,9 @@ checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -21127,8 +21395,8 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "quinn 0.11.5", - "rustls 0.23.14", + "quinn", + "rustls 0.23.18", "rustls-pemfile 2.0.0", "rustls-pki-types", "serde", @@ -21200,16 +21468,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -21682,14 +21951,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.25" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.11", + "linux-raw-sys 0.4.10", "windows-sys 0.48.0", ] @@ -21723,7 +21992,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle 2.5.0", @@ -21732,13 +22001,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "log", "once_cell", - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle 2.5.0", @@ -21804,9 +22073,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-platform-verifier" @@ -21819,7 +22088,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-native-certs 0.7.0", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -21851,7 +22120,7 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "untrusted 0.9.0", ] @@ -22662,7 +22931,7 @@ dependencies = [ name = "sc-executor-common" version = "0.29.0" dependencies = [ - "polkavm 0.9.3", + "polkavm 0.18.0", "sc-allocator 23.0.0", "sp-maybe-compressed-blob 11.0.0", "sp-wasm-interface 20.0.0", @@ -22703,7 +22972,7 @@ name = "sc-executor-polkavm" version = "0.29.0" dependencies = [ "log", - "polkavm 0.9.3", + "polkavm 0.18.0", "sc-executor-common 0.29.0", "sp-wasm-interface 20.0.0", ] @@ -22860,7 +23129,7 @@ dependencies = [ "assert_matches", "async-channel 1.9.0", "async-trait", - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "cid 0.9.0", "criterion", @@ -22869,7 +23138,7 @@ dependencies = [ "futures", "futures-timer", "ip_network", - "libp2p", + "libp2p 0.54.1", "linked_hash_set", "litep2p", "log", @@ -23045,7 +23314,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "libp2p", + "libp2p 0.54.1", "log", "parking_lot 0.12.3", "rand", @@ -23126,7 +23395,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "rand", - "rustls 0.23.14", + "rustls 0.23.18", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -23250,6 +23519,7 @@ dependencies = [ "futures", "futures-util", "hex", + "itertools 0.11.0", "jsonrpsee", "log", "parity-scale-codec", @@ -23295,6 +23565,25 @@ dependencies = [ "substrate-wasm-builder 17.0.0", ] +[[package]] +name = "sc-runtime-utilities" +version = "0.1.0" +dependencies = [ + "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-test-runtime", + "parity-scale-codec", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", + "sp-core 28.0.0", + "sp-crypto-hashing 0.1.0", + "sp-io 30.0.0", + "sp-state-machine 0.35.0", + "sp-version 29.0.0", + "sp-wasm-interface 20.0.0", + "subxt", + "thiserror", +] + [[package]] name = "sc-service" version = "0.35.0" @@ -23483,7 +23772,7 @@ version = "15.0.0" dependencies = [ "chrono", "futures", - "libp2p", + "libp2p 0.54.1", "log", "parking_lot 0.12.3", "pin-project", @@ -23545,7 +23834,7 @@ dependencies = [ "criterion", "futures", "futures-timer", - "indexmap 2.2.3", + "indexmap 2.7.0", "itertools 0.11.0", "linked-hash-map", "log", @@ -23685,9 +23974,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa7ffc1c0ef49b0452c6e2986abf2b07743320641ffd5fc63d552458e3b779b" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "bitvec", "cfg-if", @@ -23699,9 +23988,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46385cc24172cf615450267463f937c10072516359b3ff1cb24228a4a08bf951" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", @@ -24033,12 +24322,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" -[[package]] -name = "send_wrapper" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" - [[package]] name = "separator" version = "0.4.1" @@ -24119,7 +24402,7 @@ version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -24153,7 +24436,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "itoa", "ryu", "serde", @@ -24240,6 +24523,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -24632,7 +24925,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek 4.1.3", "rand_core 0.6.4", - "ring 0.17.7", + "ring 0.17.8", "rustc_version 0.4.0", "sha2 0.10.8", "subtle 2.5.0", @@ -24932,8 +25225,7 @@ dependencies = [ name = "snowbridge-pallet-inbound-queue" version = "0.2.0" dependencies = [ - "alloy-primitives", - "alloy-sol-types", + "alloy-core", "frame-benchmarking 28.0.0", "frame-support 28.0.0", "frame-system 28.0.0", @@ -24963,8 +25255,8 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2e6a9d00e60e3744e6b6f0c21fea6694b9c6401ac40e41340a96e561dcf1935" dependencies = [ - "alloy-primitives", - "alloy-sol-types", + "alloy-primitives 0.4.2", + "alloy-sol-types 0.4.2", "frame-benchmarking 38.0.0", "frame-support 38.0.0", "frame-system 38.0.0", @@ -25518,6 +25810,7 @@ dependencies = [ "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", @@ -26204,7 +26497,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -26463,7 +26756,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "polkavm-derive 0.9.1", + "polkavm-derive 0.18.0", "rustversion", "secp256k1 0.28.2", "sp-core 28.0.0", @@ -26649,7 +26942,7 @@ dependencies = [ name = "sp-metadata-ir" version = "0.6.0" dependencies = [ - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "parity-scale-codec", "scale-info", ] @@ -26947,7 +27240,7 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.9.1", + "polkavm-derive 0.18.0", "primitive-types 0.13.1", "rustversion", "sp-core 28.0.0", @@ -27027,7 +27320,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "Inflector", "proc-macro-crate 1.3.1", @@ -27652,7 +27945,7 @@ name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", - "proc-macro-warning 1.0.0", + "proc-macro-warning", "proc-macro2 1.0.86", "quote 1.0.37", "sp-version 29.0.0", @@ -28586,12 +28879,12 @@ dependencies = [ "cargo_metadata", "console", "filetime", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "jobserver", "merkleized-metadata", "parity-scale-codec", "parity-wasm", - "polkavm-linker 0.9.2", + "polkavm-linker 0.18.0", "sc-executor 0.32.0", "shlex", "sp-core 28.0.0", @@ -28978,6 +29271,18 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "syn-solidity" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219389c1ebe89f8333df8bdfb871f6631c552ff399c23cac02480b6088aad8f0" +dependencies = [ + "paste", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "sync_wrapper" version = "1.0.1" @@ -29075,7 +29380,7 @@ dependencies = [ "cfg-if", "fastrand 2.1.0", "redox_syscall 0.4.1", - "rustix 0.38.25", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -29105,7 +29410,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.25", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -29527,7 +29832,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.14", + "rustls 0.23.18", "rustls-pki-types", "tokio", ] @@ -29635,7 +29940,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -29648,7 +29953,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "toml_datetime", "winnow 0.5.15", ] @@ -29659,7 +29964,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -29919,78 +30224,6 @@ dependencies = [ "keccak-hasher", ] -[[package]] -name = "trust-dns-proto" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner 0.5.1", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "rand", - "smallvec", - "socket2 0.4.9", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-proto" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner 0.6.0", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.4.0", - "ipnet", - "once_cell", - "rand", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", - "once_cell", - "parking_lot 0.12.3", - "rand", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto 0.23.2", -] - [[package]] name = "try-lock" version = "0.2.4" @@ -30200,7 +30433,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "futures-io", "futures-util", @@ -30238,7 +30471,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-pki-types", "serde", "serde_json", @@ -30949,7 +31182,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -31515,35 +31748,18 @@ dependencies = [ "zeroize", ] -[[package]] -name = "x509-parser" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" -dependencies = [ - "asn1-rs 0.5.2", - "data-encoding", - "der-parser 8.2.0", - "lazy_static", - "nom", - "oid-registry 0.6.1", - "rusticata-macros", - "thiserror", - "time", -] - [[package]] name = "x509-parser" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", "data-encoding", - "der-parser 9.0.0", + "der-parser", "lazy_static", "nom", - "oid-registry 0.7.0", + "oid-registry", "rusticata-macros", "thiserror", "time", @@ -31621,10 +31837,13 @@ name = "xcm-executor-integration-tests" version = "1.0.0" dependencies = [ "frame-support 28.0.0", + "frame-system 28.0.0", "futures", + "pallet-sudo 28.0.0", "pallet-transaction-payment 28.0.0", "pallet-xcm 7.0.0", "parity-scale-codec", + "polkadot-runtime-parachains 7.0.0", "polkadot-test-client", "polkadot-test-runtime", "polkadot-test-service", @@ -31643,6 +31862,7 @@ name = "xcm-procedural" version = "7.0.0" dependencies = [ "Inflector", + "frame-support 28.0.0", "proc-macro2 1.0.86", "quote 1.0.37", "staging-xcm 7.0.0", @@ -31829,6 +32049,22 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yamux" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31b5e376a8b012bee9c423acdbb835fc34d45001cfa3106236a624e4b738028" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot 0.12.3", + "pin-project", + "rand", + "static_assertions", + "web-time", +] + [[package]] name = "yansi" version = "0.5.1" @@ -31908,9 +32144,9 @@ dependencies = [ [[package]] name = "zombienet-configuration" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d7a8cc4f8e8bb3f40757b62d3b054da5c95f43321c775eb321edc89d431583e" +checksum = "8ad4fc5b0f1aa54de6bf2d6771c449b41cad47e1cf30559af0a71452686b47ab" dependencies = [ "anyhow", "lazy_static", @@ -31928,16 +32164,16 @@ dependencies = [ [[package]] name = "zombienet-orchestrator" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d32fa87851f41443a78971bd7110274f9a66d139ac834de159adc08f90cf8e3" +checksum = "e4a7dd25842ded75c7f4dc4f38f05fef567bd0b37fd3057c223d4ee34d8fa817" dependencies = [ "anyhow", "async-trait", "futures", "glob-match", "hex", - "libp2p", + "libp2p 0.52.4", "libsecp256k1", "multiaddr 0.18.1", "rand", @@ -31961,9 +32197,9 @@ dependencies = [ [[package]] name = "zombienet-prom-metrics-parser" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9acb9c94bc7c2c83f8eb8e26ed403f757af1632f22b89394d8876412ede990ca" +checksum = "a63e0c6024dd19b0f8b28afa94f78c211e5c163350ecda4a48084532d74d7cfe" dependencies = [ "pest", "pest_derive", @@ -31972,9 +32208,9 @@ dependencies = [ [[package]] name = "zombienet-provider" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc8f3f71d4d974fc4a2262fa9293c2eedc423540378bd7c1dc1b66cc95d1d1af" +checksum = "8d87c29390a342d0f4f62b6796861fb82e0e56c49929a272b689e8dbf24eaab9" dependencies = [ "anyhow", "async-trait", @@ -32003,9 +32239,9 @@ dependencies = [ [[package]] name = "zombienet-sdk" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dbfddce7a6100cdc930b93301f1b6381e6577ecc013d6802258ea6902a2bebd" +checksum = "829e5111182caf00ba57cd63656cf0bde6ce6add7f6a9747d15821c202a3f27e" dependencies = [ "async-trait", "futures", @@ -32020,9 +32256,9 @@ dependencies = [ [[package]] name = "zombienet-support" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d20567c52b4fd46b600cda254dedb6a6dc30cabf512de91e4f6f78f0f7f4644b" +checksum = "99568384a1d9645458ab9de377b3517cb543a1ece5aba905aeb58d269139df4e" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 533ea4c9e878..37765056196e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,6 +293,7 @@ members = [ "substrate/client/rpc-api", "substrate/client/rpc-servers", "substrate/client/rpc-spec-v2", + "substrate/client/runtime-utilities", "substrate/client/service", "substrate/client/service/test", "substrate/client/state-db", @@ -594,8 +595,7 @@ zero-prefixed-literal = { level = "allow", priority = 2 } # 00_1000_0 Inflector = { version = "0.11.4" } aes-gcm = { version = "0.10" } ahash = { version = "0.8.2" } -alloy-primitives = { version = "0.4.2", default-features = false } -alloy-sol-types = { version = "0.4.2", default-features = false } +alloy-core = { version = "0.8.15", default-features = false } always-assert = { version = "0.1" } anyhow = { version = "1.0.81", default-features = false } approx = { version = "0.5.1" } @@ -779,7 +779,7 @@ frame-benchmarking-pallet-pov = { default-features = false, path = "substrate/fr frame-election-provider-solution-type = { path = "substrate/frame/election-provider-support/solution-type", default-features = false } frame-election-provider-support = { path = "substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "substrate/frame/executive", default-features = false } -frame-metadata = { version = "16.0.0", default-features = false } +frame-metadata = { version = "18.0.0", default-features = false } frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "substrate/frame/support", default-features = false } frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false } @@ -841,20 +841,20 @@ kvdb-shared-tests = { version = "0.11.0" } landlock = { version = "0.3.0" } libc = { version = "0.2.155" } libfuzzer-sys = { version = "0.4" } -libp2p = { version = "0.52.4" } +libp2p = { version = "0.54.1" } libp2p-identity = { version = "0.2.9" } libsecp256k1 = { version = "0.7.0", default-features = false } linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.8.1", features = ["websocket"] } +litep2p = { version = "0.8.4", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } memmap2 = { version = "0.9.3" } memory-db = { version = "0.32.0", default-features = false } -merkleized-metadata = { version = "0.1.0" } +merkleized-metadata = { version = "0.2.0" } merlin = { version = "3.0", default-features = false } messages-relay = { path = "bridges/relays/messages" } metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" } @@ -1089,9 +1089,9 @@ polkadot-subsystem-bench = { path = "polkadot/node/subsystem-bench" } polkadot-test-client = { path = "polkadot/node/test/client" } polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" } polkadot-test-service = { path = "polkadot/node/test/service" } -polkavm = { version = "0.9.3", default-features = false } -polkavm-derive = "0.9.1" -polkavm-linker = "0.9.2" +polkavm = { version = "0.18.0", default-features = false } +polkavm-derive = "0.18.0" +polkavm-linker = "0.18.0" portpicker = { version = "0.1.1" } pretty_assertions = { version = "1.3.0" } primitive-types = { version = "0.13.1", default-features = false, features = [ @@ -1136,7 +1136,7 @@ rstest = { version = "0.18.2" } rustc-hash = { version = "1.1.0" } rustc-hex = { version = "2.1.0", default-features = false } rustix = { version = "0.36.7", default-features = false } -rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.18", default-features = false, features = ["logging", "ring", "std", "tls12"] } rustversion = { version = "1.0.17" } rusty-fork = { version = "0.3.0", default-features = false } safe-mix = { version = "1.0", default-features = false } @@ -1184,6 +1184,7 @@ sc-rpc-api = { path = "substrate/client/rpc-api", default-features = false } sc-rpc-server = { path = "substrate/client/rpc-servers", default-features = false } sc-rpc-spec-v2 = { path = "substrate/client/rpc-spec-v2", default-features = false } sc-runtime-test = { path = "substrate/client/executor/runtime-test" } +sc-runtime-utilities = { path = "substrate/client/runtime-utilities", default-features = true } sc-service = { path = "substrate/client/service", default-features = false } sc-service-test = { path = "substrate/client/service/test" } sc-state-db = { path = "substrate/client/state-db", default-features = false } @@ -1197,7 +1198,7 @@ sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default- sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } sc-utils = { path = "substrate/client/utils", default-features = false } -scale-info = { version = "2.11.1", default-features = false } +scale-info = { version = "2.11.6", default-features = false } schemars = { version = "0.8.13", default-features = false } schnellru = { version = "0.2.3" } schnorrkel = { version = "0.11.4", default-features = false } @@ -1317,6 +1318,7 @@ substrate-test-runtime-transaction-pool = { path = "substrate/test-utils/runtime substrate-test-utils = { path = "substrate/test-utils" } substrate-wasm-builder = { path = "substrate/utils/wasm-builder", default-features = false } subxt = { version = "0.38", default-features = false } +subxt-metadata = { version = "0.38.0", default-features = false } subxt-signer = { version = "0.38" } syn = { version = "2.0.87" } sysinfo = { version = "0.30" } @@ -1387,7 +1389,7 @@ xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false } xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false } xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false } zeroize = { version = "1.7.0", default-features = false } -zombienet-sdk = { version = "0.2.15" } +zombienet-sdk = { version = "0.2.16" } zstd = { version = "0.12.4", default-features = false } [profile.release] diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 37b56140c289..49cd086fd3eb 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -99,6 +99,7 @@ runtime-benchmarks = [ "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-trie", + "xcm/runtime-benchmarks", ] integrity-test = ["static_assertions"] test-helpers = ["bp-runtime/test-helpers", "sp-trie"] diff --git a/bridges/bin/runtime-common/src/integrity.rs b/bridges/bin/runtime-common/src/integrity.rs index 2ff6c4c9165a..535f1a26e5e8 100644 --- a/bridges/bin/runtime-common/src/integrity.rs +++ b/bridges/bin/runtime-common/src/integrity.rs @@ -89,13 +89,11 @@ macro_rules! assert_bridge_messages_pallet_types( /// Macro that combines four other macro calls - `assert_chain_types`, `assert_bridge_types`, /// and `assert_bridge_messages_pallet_types`. It may be used -/// at the chain that is implementing complete standard messages bridge (i.e. with bridge GRANDPA -/// and messages pallets deployed). +/// at the chain that is implementing standard messages bridge with messages pallets deployed. #[macro_export] macro_rules! assert_complete_bridge_types( ( runtime: $r:path, - with_bridged_chain_grandpa_instance: $gi:path, with_bridged_chain_messages_instance: $mi:path, this_chain: $this:path, bridged_chain: $bridged:path, @@ -186,34 +184,55 @@ where ); } -/// Parameters for asserting bridge pallet names. +/// Parameters for asserting bridge GRANDPA pallet names. #[derive(Debug)] -pub struct AssertBridgePalletNames<'a> { +struct AssertBridgeGrandpaPalletNames<'a> { /// Name of the GRANDPA pallet, deployed at this chain and used to bridge with the bridged /// chain. pub with_bridged_chain_grandpa_pallet_name: &'a str, - /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged - /// chain. - pub with_bridged_chain_messages_pallet_name: &'a str, } /// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants /// from chain primitives crates. -fn assert_bridge_pallet_names(params: AssertBridgePalletNames) +fn assert_bridge_grandpa_pallet_names(params: AssertBridgeGrandpaPalletNames) where - R: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, + R: pallet_bridge_grandpa::Config, GI: 'static, - MI: 'static, { // check that the bridge GRANDPA pallet has required name assert_eq!( - pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), + pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), + bp_runtime::storage_value_key( + params.with_bridged_chain_grandpa_pallet_name, + "PalletOwner", + ) + .0, + ); + assert_eq!( + pallet_bridge_grandpa::PalletOperatingMode::::storage_value_final_key().to_vec(), bp_runtime::storage_value_key( params.with_bridged_chain_grandpa_pallet_name, - "PalletOwner", - ).0, + "PalletOperatingMode", + ) + .0, ); +} +/// Parameters for asserting bridge messages pallet names. +#[derive(Debug)] +struct AssertBridgeMessagesPalletNames<'a> { + /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged + /// chain. + pub with_bridged_chain_messages_pallet_name: &'a str, +} + +/// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants +/// from chain primitives crates. +fn assert_bridge_messages_pallet_names(params: AssertBridgeMessagesPalletNames) +where + R: pallet_bridge_messages::Config, + MI: 'static, +{ // check that the bridge messages pallet has required name assert_eq!( pallet_bridge_messages::PalletOwner::::storage_value_final_key().to_vec(), @@ -223,6 +242,14 @@ where ) .0, ); + assert_eq!( + pallet_bridge_messages::PalletOperatingMode::::storage_value_final_key().to_vec(), + bp_runtime::storage_value_key( + params.with_bridged_chain_messages_pallet_name, + "PalletOperatingMode", + ) + .0, + ); } /// Parameters for asserting complete standard messages bridge. @@ -246,9 +273,11 @@ pub fn assert_complete_with_relay_chain_bridge_constants( assert_chain_constants::(params.this_chain_constants); assert_bridge_grandpa_pallet_constants::(); assert_bridge_messages_pallet_constants::(); - assert_bridge_pallet_names::(AssertBridgePalletNames { + assert_bridge_grandpa_pallet_names::(AssertBridgeGrandpaPalletNames { with_bridged_chain_grandpa_pallet_name: >::BridgedChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + }); + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { with_bridged_chain_messages_pallet_name: >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, }); @@ -256,21 +285,43 @@ pub fn assert_complete_with_relay_chain_bridge_constants( /// All bridge-related constants tests for the complete standard parachain messages bridge /// (i.e. with bridge GRANDPA, parachains and messages pallets deployed). -pub fn assert_complete_with_parachain_bridge_constants( +pub fn assert_complete_with_parachain_bridge_constants( params: AssertCompleteBridgeConstants, ) where R: frame_system::Config - + pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + pallet_bridge_messages::Config, - GI: 'static, + >::BridgedRelayChain: ChainWithGrandpa, + PI: 'static, + MI: 'static, +{ + assert_chain_constants::(params.this_chain_constants); + assert_bridge_grandpa_pallet_constants::(); + assert_bridge_messages_pallet_constants::(); + assert_bridge_grandpa_pallet_names::( + AssertBridgeGrandpaPalletNames { + with_bridged_chain_grandpa_pallet_name: + <>::BridgedRelayChain>::WITH_CHAIN_GRANDPA_PALLET_NAME, + }, + ); + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { + with_bridged_chain_messages_pallet_name: + >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + }); +} + +/// All bridge-related constants tests for the standalone messages bridge deployment (only with +/// messages pallets deployed). +pub fn assert_standalone_messages_bridge_constants(params: AssertCompleteBridgeConstants) +where + R: frame_system::Config + pallet_bridge_messages::Config, MI: 'static, - RelayChain: ChainWithGrandpa, { assert_chain_constants::(params.this_chain_constants); - assert_bridge_grandpa_pallet_constants::(); assert_bridge_messages_pallet_constants::(); - assert_bridge_pallet_names::(AssertBridgePalletNames { - with_bridged_chain_grandpa_pallet_name: RelayChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { with_bridged_chain_messages_pallet_name: >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, }); diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 6cf04b452da7..88037d9deff5 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -196,6 +196,7 @@ impl pallet_bridge_messages::Config for TestRuntime { type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), + (), ConstU64<100_000>, >; type OnMessagesDelivered = (); diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index 363a869048aa..4eb93ab52bc9 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -19,10 +19,14 @@ scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies frame-support = { workspace = true } +sp-core = { workspace = true } # Bridge Dependencies bp-xcm-bridge-hub-router = { workspace = true } +# Polkadot dependencies +xcm = { workspace = true } + [features] default = ["std"] std = [ @@ -30,4 +34,6 @@ std = [ "codec/std", "frame-support/std", "scale-info/std", + "sp-core/std", + "xcm/std", ] diff --git a/bridges/chains/chain-asset-hub-rococo/src/lib.rs b/bridges/chains/chain-asset-hub-rococo/src/lib.rs index de2e9ae856d1..4ff7b391acd0 100644 --- a/bridges/chains/chain-asset-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-asset-hub-rococo/src/lib.rs @@ -18,10 +18,13 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use codec::{Decode, Encode}; use scale_info::TypeInfo; pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; +use xcm::latest::prelude::*; /// `AssetHubRococo` Runtime `Call` enum. /// @@ -44,5 +47,27 @@ frame_support::parameter_types! { pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); } +/// Builds an (un)congestion XCM program with the `report_bridge_status` call for +/// `ToWestendXcmRouter`. +pub fn build_congestion_message( + bridge_id: sp_core::H256, + is_congested: bool, +) -> alloc::vec::Vec> { + alloc::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + fallback_max_weight: Some(XcmBridgeHubRouterTransactCallMaxWeight::get()), + call: Call::ToWestendXcmRouter(XcmBridgeHubRouterCall::report_bridge_status { + bridge_id, + is_congested, + }) + .encode() + .into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ] +} + /// Identifier of AssetHubRococo in the Rococo relay chain. pub const ASSET_HUB_ROCOCO_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index 430d9b6116cf..22071399f4d1 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -19,10 +19,14 @@ scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies frame-support = { workspace = true } +sp-core = { workspace = true } # Bridge Dependencies bp-xcm-bridge-hub-router = { workspace = true } +# Polkadot dependencies +xcm = { workspace = true } + [features] default = ["std"] std = [ @@ -30,4 +34,6 @@ std = [ "codec/std", "frame-support/std", "scale-info/std", + "sp-core/std", + "xcm/std", ] diff --git a/bridges/chains/chain-asset-hub-westend/src/lib.rs b/bridges/chains/chain-asset-hub-westend/src/lib.rs index 9de1c8809894..9d245e08f7cc 100644 --- a/bridges/chains/chain-asset-hub-westend/src/lib.rs +++ b/bridges/chains/chain-asset-hub-westend/src/lib.rs @@ -18,10 +18,13 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use codec::{Decode, Encode}; use scale_info::TypeInfo; pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; +use xcm::latest::prelude::*; /// `AssetHubWestend` Runtime `Call` enum. /// @@ -44,5 +47,27 @@ frame_support::parameter_types! { pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); } +/// Builds an (un)congestion XCM program with the `report_bridge_status` call for +/// `ToRococoXcmRouter`. +pub fn build_congestion_message( + bridge_id: sp_core::H256, + is_congested: bool, +) -> alloc::vec::Vec> { + alloc::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + fallback_max_weight: Some(XcmBridgeHubRouterTransactCallMaxWeight::get()), + call: Call::ToRococoXcmRouter(XcmBridgeHubRouterCall::report_bridge_status { + bridge_id, + is_congested, + }) + .encode() + .into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ] +} + /// Identifier of AssetHubWestend in the Westend relay chain. pub const ASSET_HUB_WESTEND_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index c5c18beb2cad..070bc7b0ba3d 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -225,4 +225,4 @@ impl ChainWithMessages for PolkadotBulletin { } decl_bridge_finality_runtime_apis!(polkadot_bulletin, grandpa); -decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::HashedLaneId); +decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::LegacyLaneId); diff --git a/bridges/modules/relayers/src/extension/mod.rs b/bridges/modules/relayers/src/extension/mod.rs index 34d280d26d6e..d562ed9bcd0e 100644 --- a/bridges/modules/relayers/src/extension/mod.rs +++ b/bridges/modules/relayers/src/extension/mod.rs @@ -129,7 +129,7 @@ pub struct BridgeRelayersTransactionExtension( impl BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig + R: RelayersConfig + BridgeMessagesConfig + TransactionPaymentConfig, C: ExtensionConfig, @@ -250,7 +250,7 @@ where // let's also replace the weight of slashing relayer with the weight of rewarding relayer if call_info.is_receive_messages_proof_call() { post_info_weight = post_info_weight.saturating_sub( - ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), + >::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), ); } @@ -278,7 +278,7 @@ impl TransactionExtension for BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig + R: RelayersConfig + BridgeMessagesConfig + TransactionPaymentConfig, C: ExtensionConfig, @@ -326,7 +326,9 @@ where }; // we only boost priority if relayer has staked required balance - if !RelayersPallet::::is_registration_active(&data.relayer) { + if !RelayersPallet::::is_registration_active( + &data.relayer, + ) { return Ok((Default::default(), Some(data), origin)) } @@ -382,7 +384,11 @@ where match call_result { RelayerAccountAction::None => (), RelayerAccountAction::Reward(relayer, reward_account, reward) => { - RelayersPallet::::register_relayer_reward(reward_account, &relayer, reward); + RelayersPallet::::register_relayer_reward( + reward_account, + &relayer, + reward, + ); log::trace!( target: LOG_TARGET, @@ -394,7 +400,7 @@ where ); }, RelayerAccountAction::Slash(relayer, slash_account) => - RelayersPallet::::slash_and_deregister( + RelayersPallet::::slash_and_deregister( &relayer, ExplicitOrAccountParams::Params(slash_account), ), diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index f06c2e16ac24..d1c71b6d3051 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -22,8 +22,9 @@ use bp_relayers::{ ExplicitOrAccountParams, PaymentProcedure, Registration, RelayerRewardsKeyProvider, - RewardsAccountParams, StakeAndSlash, + StakeAndSlash, }; +pub use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::StorageDoubleMapKeyProvider; use frame_support::fail; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; @@ -31,7 +32,7 @@ use sp_runtime::{traits::CheckedSub, Saturating}; use sp_std::marker::PhantomData; pub use pallet::*; -pub use payment_adapter::DeliveryConfirmationPaymentsAdapter; +pub use payment_adapter::{DeliveryConfirmationPaymentsAdapter, PayRewardFromAccount}; pub use stake_adapter::StakeAndSlashNamed; pub use weights::WeightInfo; pub use weights_ext::WeightInfoExt; diff --git a/bridges/modules/relayers/src/mock.rs b/bridges/modules/relayers/src/mock.rs index d186e968e648..7dc213249379 100644 --- a/bridges/modules/relayers/src/mock.rs +++ b/bridges/modules/relayers/src/mock.rs @@ -171,14 +171,14 @@ pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< frame_support::construct_runtime! { pub enum TestRuntime { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system, Utility: pallet_utility, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event}, - BridgeGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage, Event}, - BridgeParachains: pallet_bridge_parachains::{Pallet, Call, Storage, Event}, - BridgeMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + BridgeRelayers: pallet_bridge_relayers, + BridgeGrandpa: pallet_bridge_grandpa, + BridgeParachains: pallet_bridge_parachains, + BridgeMessages: pallet_bridge_messages, } } @@ -267,6 +267,7 @@ impl pallet_bridge_messages::Config for TestRuntime { type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), + (), ConstU64<100_000>, >; type OnMessagesDelivered = (); diff --git a/bridges/modules/relayers/src/payment_adapter.rs b/bridges/modules/relayers/src/payment_adapter.rs index 5383cba5ecbd..5af0d8f9dfbf 100644 --- a/bridges/modules/relayers/src/payment_adapter.rs +++ b/bridges/modules/relayers/src/payment_adapter.rs @@ -22,6 +22,7 @@ use bp_messages::{ source_chain::{DeliveryConfirmationPayments, RelayersRewards}, MessageNonce, }; +pub use bp_relayers::PayRewardFromAccount; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::Chain; use frame_support::{sp_runtime::SaturatedConversion, traits::Get}; @@ -31,15 +32,16 @@ use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeIn /// Adapter that allows relayers pallet to be used as a delivery+dispatch payment mechanism /// for the messages pallet. -pub struct DeliveryConfirmationPaymentsAdapter( - PhantomData<(T, MI, DeliveryReward)>, +pub struct DeliveryConfirmationPaymentsAdapter( + PhantomData<(T, MI, RI, DeliveryReward)>, ); -impl DeliveryConfirmationPayments> - for DeliveryConfirmationPaymentsAdapter +impl DeliveryConfirmationPayments> + for DeliveryConfirmationPaymentsAdapter where - T: Config + pallet_bridge_messages::Config::LaneId>, + T: Config + pallet_bridge_messages::Config>::LaneId>, MI: 'static, + RI: 'static, DeliveryReward: Get, { type Error = &'static str; @@ -54,7 +56,7 @@ where bp_messages::calc_relayers_rewards::(messages_relayers, received_range); let rewarded_relayers = relayers_rewards.len(); - register_relayers_rewards::( + register_relayers_rewards::( confirmation_relayer, relayers_rewards, RewardsAccountParams::new( @@ -70,7 +72,7 @@ where } // Update rewards to given relayers, optionally rewarding confirmation relayer. -fn register_relayers_rewards( +fn register_relayers_rewards, I: 'static>( confirmation_relayer: &T::AccountId, relayers_rewards: RelayersRewards, lane_id: RewardsAccountParams, @@ -84,7 +86,7 @@ fn register_relayers_rewards( let relayer_reward = T::Reward::saturated_from(messages).saturating_mul(delivery_fee); if relayer != *confirmation_relayer { - Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); + Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); } else { confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(relayer_reward); @@ -92,7 +94,7 @@ fn register_relayers_rewards( } // finally - pay reward to confirmation relayer - Pallet::::register_relayer_reward( + Pallet::::register_relayer_reward( lane_id, confirmation_relayer, confirmation_relayer_reward, @@ -115,7 +117,7 @@ mod tests { #[test] fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { run_test(|| { - register_relayers_rewards::( + register_relayers_rewards::( &RELAYER_2, relayers_rewards(), test_reward_account_param(), @@ -136,7 +138,7 @@ mod tests { #[test] fn confirmation_relayer_is_not_rewarded_if_it_has_not_delivered_any_messages() { run_test(|| { - register_relayers_rewards::( + register_relayers_rewards::( &RELAYER_3, relayers_rewards(), test_reward_account_param(), diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 55824f6a7fe7..b0286938f36d 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -56,6 +56,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs index 3c4a10f82e7d..ff06a1e3c8c5 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs @@ -18,9 +18,9 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{DeliveryFeeFactor, MINIMAL_DELIVERY_FEE_FACTOR}; +use crate::{Bridge, BridgeState, Call, MINIMAL_DELIVERY_FEE_FACTOR}; use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError}; -use frame_support::traits::{Get, Hooks}; +use frame_support::traits::{EnsureOrigin, Get, Hooks, UnfilteredDispatchable}; use sp_runtime::traits::Zero; use xcm::prelude::*; @@ -45,16 +45,35 @@ pub trait Config: crate::Config { benchmarks_instance_pallet! { on_initialize_when_non_congested { - DeliveryFeeFactor::::put(MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR); + Bridge::::put(BridgeState { + is_congested: false, + delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, + }); }: { crate::Pallet::::on_initialize(Zero::zero()) } on_initialize_when_congested { - DeliveryFeeFactor::::put(MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR); + Bridge::::put(BridgeState { + is_congested: false, + delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, + }); let _ = T::ensure_bridged_target_destination()?; T::make_congested(); }: { crate::Pallet::::on_initialize(Zero::zero()) } + + report_bridge_status { + Bridge::::put(BridgeState::default()); + + let origin: T::RuntimeOrigin = T::BridgeHubOrigin::try_successful_origin().expect("expected valid BridgeHubOrigin"); + let bridge_id = Default::default(); + let is_congested = true; + + let call = Call::::report_bridge_status { bridge_id, is_congested }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert!(Bridge::::get().is_congested); + } } diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index fe8f5a2efdfb..7361696faba7 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -30,9 +30,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -pub use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; +pub use bp_xcm_bridge_hub_router::{BridgeState, XcmChannelStatusProvider}; use codec::Encode; use frame_support::traits::Get; +use sp_core::H256; use sp_runtime::{FixedPointNumber, FixedU128, Saturating}; use sp_std::vec::Vec; use xcm::prelude::*; @@ -98,6 +99,8 @@ pub mod pallet { /// Checks the XCM version for the destination. type DestinationVersion: GetVersion; + /// Origin of the sibling bridge hub that is allowed to report bridge status. + type BridgeHubOrigin: EnsureOrigin; /// Actual message sender (`HRMP` or `DMP`) to the sibling bridge hub location. type ToBridgeHubSender: SendXcm; /// Local XCM channel manager. @@ -120,95 +123,112 @@ pub mod pallet { return T::WeightInfo::on_initialize_when_congested() } + // if bridge has reported congestion, we don't change anything + let mut bridge = Self::bridge(); + if bridge.is_congested { + return T::WeightInfo::on_initialize_when_congested() + } + // if we can't decrease the delivery fee factor anymore, we don't change anything - let mut delivery_fee_factor = Self::delivery_fee_factor(); - if delivery_fee_factor == MINIMAL_DELIVERY_FEE_FACTOR { + if bridge.delivery_fee_factor == MINIMAL_DELIVERY_FEE_FACTOR { return T::WeightInfo::on_initialize_when_congested() } - let previous_factor = delivery_fee_factor; - delivery_fee_factor = - MINIMAL_DELIVERY_FEE_FACTOR.max(delivery_fee_factor / EXPONENTIAL_FEE_BASE); + let previous_factor = bridge.delivery_fee_factor; + bridge.delivery_fee_factor = + MINIMAL_DELIVERY_FEE_FACTOR.max(bridge.delivery_fee_factor / EXPONENTIAL_FEE_BASE); + log::info!( target: LOG_TARGET, "Bridge channel is uncongested. Decreased fee factor from {} to {}", previous_factor, - delivery_fee_factor, + bridge.delivery_fee_factor, ); Self::deposit_event(Event::DeliveryFeeFactorDecreased { - new_value: delivery_fee_factor, + new_value: bridge.delivery_fee_factor, }); - DeliveryFeeFactor::::put(delivery_fee_factor); + Bridge::::put(bridge); T::WeightInfo::on_initialize_when_non_congested() } } - /// Initialization value for the delivery fee factor. - #[pallet::type_value] - pub fn InitialFactor() -> FixedU128 { - MINIMAL_DELIVERY_FEE_FACTOR + #[pallet::call] + impl, I: 'static> Pallet { + /// Notification about congested bridge queue. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::report_bridge_status())] + pub fn report_bridge_status( + origin: OriginFor, + // this argument is not currently used, but to ease future migration, we'll keep it + // here + bridge_id: H256, + is_congested: bool, + ) -> DispatchResult { + let _ = T::BridgeHubOrigin::ensure_origin(origin)?; + + log::info!( + target: LOG_TARGET, + "Received bridge status from {:?}: congested = {}", + bridge_id, + is_congested, + ); + + Bridge::::mutate(|bridge| { + bridge.is_congested = is_congested; + }); + Ok(()) + } } - /// The number to multiply the base delivery fee by. + /// Bridge that we are using. /// - /// This factor is shared by all bridges, served by this pallet. For example, if this - /// chain (`Config::UniversalLocation`) opens two bridges ( - /// `X2(GlobalConsensus(Config::BridgedNetworkId::get()), Parachain(1000))` and - /// `X2(GlobalConsensus(Config::BridgedNetworkId::get()), Parachain(2000))`), then they - /// both will be sharing the same fee factor. This is because both bridges are sharing - /// the same local XCM channel with the child/sibling bridge hub, which we are using - /// to detect congestion: - /// - /// ```nocompile - /// ThisChain --- Local XCM channel --> Sibling Bridge Hub ------ - /// | | - /// | | - /// | | - /// Lane1 Lane2 - /// | | - /// | | - /// | | - /// \ / | - /// Parachain1 <-- Local XCM channel --- Remote Bridge Hub <------ - /// | - /// | - /// Parachain1 <-- Local XCM channel --------- - /// ``` - /// - /// If at least one of other channels is congested, the local XCM channel with sibling - /// bridge hub eventually becomes congested too. And we have no means to detect - which - /// bridge exactly causes the congestion. So the best solution here is not to make - /// any differences between all bridges, started by this chain. + /// **bridges-v1** assumptions: all outbound messages through this router are using single lane + /// and to single remote consensus. If there is some other remote consensus that uses the same + /// bridge hub, the separate pallet instance shall be used, In `v2` we'll have all required + /// primitives (lane-id aka bridge-id, derived from XCM locations) to support multiple bridges + /// by the same pallet instance. #[pallet::storage] - #[pallet::getter(fn delivery_fee_factor)] - pub type DeliveryFeeFactor, I: 'static = ()> = - StorageValue<_, FixedU128, ValueQuery, InitialFactor>; + #[pallet::getter(fn bridge)] + pub type Bridge, I: 'static = ()> = StorageValue<_, BridgeState, ValueQuery>; impl, I: 'static> Pallet { /// Called when new message is sent (queued to local outbound XCM queue) over the bridge. pub(crate) fn on_message_sent_to_bridge(message_size: u32) { - // if outbound channel is not congested, do nothing - if !T::LocalXcmChannelManager::is_congested(&T::SiblingBridgeHubLocation::get()) { - return - } + log::trace!( + target: LOG_TARGET, + "on_message_sent_to_bridge - message_size: {message_size:?}", + ); + let _ = Bridge::::try_mutate(|bridge| { + let is_channel_with_bridge_hub_congested = + T::LocalXcmChannelManager::is_congested(&T::SiblingBridgeHubLocation::get()); + let is_bridge_congested = bridge.is_congested; + + // if outbound queue is not congested AND bridge has not reported congestion, do + // nothing + if !is_channel_with_bridge_hub_congested && !is_bridge_congested { + return Err(()) + } + + // ok - we need to increase the fee factor, let's do that + let message_size_factor = FixedU128::from_u32(message_size.saturating_div(1024)) + .saturating_mul(MESSAGE_SIZE_FEE_BASE); + let total_factor = EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor); + let previous_factor = bridge.delivery_fee_factor; + bridge.delivery_fee_factor = + bridge.delivery_fee_factor.saturating_mul(total_factor); - // ok - we need to increase the fee factor, let's do that - let message_size_factor = FixedU128::from_u32(message_size.saturating_div(1024)) - .saturating_mul(MESSAGE_SIZE_FEE_BASE); - let total_factor = EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor); - DeliveryFeeFactor::::mutate(|f| { - let previous_factor = *f; - *f = f.saturating_mul(total_factor); log::info!( target: LOG_TARGET, "Bridge channel is congested. Increased fee factor from {} to {}", previous_factor, - f, + bridge.delivery_fee_factor, ); - Self::deposit_event(Event::DeliveryFeeFactorIncreased { new_value: *f }); - *f + Self::deposit_event(Event::DeliveryFeeFactorIncreased { + new_value: bridge.delivery_fee_factor, + }); + Ok(()) }); } } @@ -310,9 +330,9 @@ impl, I: 'static> ExporterFor for Pallet { let message_size = message.encoded_size(); let message_fee = (message_size as u128).saturating_mul(T::ByteFee::get()); let fee_sum = base_fee.saturating_add(message_fee); - - let fee_factor = Self::delivery_fee_factor(); + let fee_factor = Self::bridge().delivery_fee_factor; let fee = fee_factor.saturating_mul_int(fee_sum); + let fee = if fee > 0 { Some((T::FeeAsset::get(), fee).into()) } else { None }; log::info!( @@ -427,24 +447,47 @@ mod tests { use frame_system::{EventRecord, Phase}; use sp_runtime::traits::One; + fn congested_bridge(delivery_fee_factor: FixedU128) -> BridgeState { + BridgeState { is_congested: true, delivery_fee_factor } + } + + fn uncongested_bridge(delivery_fee_factor: FixedU128) -> BridgeState { + BridgeState { is_congested: false, delivery_fee_factor } + } + #[test] fn initial_fee_factor_is_one() { run_test(|| { - assert_eq!(DeliveryFeeFactor::::get(), MINIMAL_DELIVERY_FEE_FACTOR); + assert_eq!( + Bridge::::get(), + uncongested_bridge(MINIMAL_DELIVERY_FEE_FACTOR), + ); }) } #[test] fn fee_factor_is_not_decreased_from_on_initialize_when_xcm_channel_is_congested() { run_test(|| { - DeliveryFeeFactor::::put(FixedU128::from_rational(125, 100)); + Bridge::::put(uncongested_bridge(FixedU128::from_rational(125, 100))); TestLocalXcmChannelManager::make_congested(&SiblingBridgeHubLocation::get()); // it should not decrease, because queue is congested - let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); + let old_delivery = XcmBridgeHubRouter::bridge(); XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!(XcmBridgeHubRouter::delivery_fee_factor(), old_delivery_fee_factor); + assert_eq!(XcmBridgeHubRouter::bridge(), old_delivery); + assert_eq!(System::events(), vec![]); + }) + } + + #[test] + fn fee_factor_is_not_decreased_from_on_initialize_when_bridge_has_reported_congestion() { + run_test(|| { + Bridge::::put(congested_bridge(FixedU128::from_rational(125, 100))); + // it should not decrease, because bridge congested + let old_bridge = XcmBridgeHubRouter::bridge(); + XcmBridgeHubRouter::on_initialize(One::one()); + assert_eq!(XcmBridgeHubRouter::bridge(), old_bridge); assert_eq!(System::events(), vec![]); }) } @@ -453,16 +496,19 @@ mod tests { fn fee_factor_is_decreased_from_on_initialize_when_xcm_channel_is_uncongested() { run_test(|| { let initial_fee_factor = FixedU128::from_rational(125, 100); - DeliveryFeeFactor::::put(initial_fee_factor); + Bridge::::put(uncongested_bridge(initial_fee_factor)); - // it shold eventually decreased to one - while XcmBridgeHubRouter::delivery_fee_factor() > MINIMAL_DELIVERY_FEE_FACTOR { + // it should eventually decrease to one + while XcmBridgeHubRouter::bridge().delivery_fee_factor > MINIMAL_DELIVERY_FEE_FACTOR { XcmBridgeHubRouter::on_initialize(One::one()); } - // verify that it doesn't decreases anymore + // verify that it doesn't decrease anymore XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!(XcmBridgeHubRouter::delivery_fee_factor(), MINIMAL_DELIVERY_FEE_FACTOR); + assert_eq!( + XcmBridgeHubRouter::bridge(), + uncongested_bridge(MINIMAL_DELIVERY_FEE_FACTOR) + ); // check emitted event let first_system_event = System::events().first().cloned(); @@ -582,7 +628,7 @@ mod tests { // but when factor is larger than one, it increases the fee, so it becomes: // `(BASE_FEE + BYTE_FEE * msg_size) * F + HRMP_FEE` let factor = FixedU128::from_rational(125, 100); - DeliveryFeeFactor::::put(factor); + Bridge::::put(uncongested_bridge(factor)); let expected_fee = (FixedU128::saturating_from_integer(BASE_FEE + BYTE_FEE * (msg_size as u128)) * factor) @@ -598,7 +644,7 @@ mod tests { #[test] fn sent_message_doesnt_increase_factor_if_queue_is_uncongested() { run_test(|| { - let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); + let old_bridge = XcmBridgeHubRouter::bridge(); assert_eq!( send_xcm::( Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), @@ -609,7 +655,7 @@ mod tests { ); assert!(TestToBridgeHubSender::is_message_sent()); - assert_eq!(old_delivery_fee_factor, XcmBridgeHubRouter::delivery_fee_factor()); + assert_eq!(old_bridge, XcmBridgeHubRouter::bridge()); assert_eq!(System::events(), vec![]); }); @@ -620,7 +666,39 @@ mod tests { run_test(|| { TestLocalXcmChannelManager::make_congested(&SiblingBridgeHubLocation::get()); - let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); + let old_bridge = XcmBridgeHubRouter::bridge(); + assert_ok!(send_xcm::( + Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), + vec![ClearOrigin].into(), + ) + .map(drop)); + + assert!(TestToBridgeHubSender::is_message_sent()); + assert!( + old_bridge.delivery_fee_factor < XcmBridgeHubRouter::bridge().delivery_fee_factor + ); + + // check emitted event + let first_system_event = System::events().first().cloned(); + assert!(matches!( + first_system_event, + Some(EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::XcmBridgeHubRouter( + Event::DeliveryFeeFactorIncreased { .. } + ), + .. + }) + )); + }); + } + + #[test] + fn sent_message_increases_factor_if_bridge_has_reported_congestion() { + run_test(|| { + Bridge::::put(congested_bridge(MINIMAL_DELIVERY_FEE_FACTOR)); + + let old_bridge = XcmBridgeHubRouter::bridge(); assert_ok!(send_xcm::( Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), vec![ClearOrigin].into(), @@ -628,7 +706,9 @@ mod tests { .map(drop)); assert!(TestToBridgeHubSender::is_message_sent()); - assert!(old_delivery_fee_factor < XcmBridgeHubRouter::delivery_fee_factor()); + assert!( + old_bridge.delivery_fee_factor < XcmBridgeHubRouter::bridge().delivery_fee_factor + ); // check emitted event let first_system_event = System::events().first().cloned(); diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index 095572883920..ac642e108c2a 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -80,6 +80,7 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { type DestinationVersion = LatestOrNoneForLocationVersionChecker>; + type BridgeHubOrigin = frame_system::EnsureRoot; type ToBridgeHubSender = TestToBridgeHubSender; type LocalXcmChannelManager = TestLocalXcmChannelManager; diff --git a/bridges/modules/xcm-bridge-hub-router/src/weights.rs b/bridges/modules/xcm-bridge-hub-router/src/weights.rs index d9a0426fecaf..8f5012c9de26 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/weights.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/weights.rs @@ -52,6 +52,7 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn on_initialize_when_non_congested() -> Weight; fn on_initialize_when_congested() -> Weight; + fn report_bridge_status() -> Weight; } /// Weights for `pallet_xcm_bridge_hub_router` that are generated using one of the Bridge testnets. @@ -85,6 +86,19 @@ impl WeightInfo for BridgeWeight { // Minimum execution time: 4_239 nanoseconds. Weight::from_parts(4_383_000, 3547).saturating_add(T::DbWeight::get().reads(1_u64)) } + /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) + /// + /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: + /// 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `53` + // Estimated: `1502` + // Minimum execution time: 10_427 nanoseconds. + Weight::from_parts(10_682_000, 1502) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } } // For backwards compatibility and tests @@ -120,4 +134,17 @@ impl WeightInfo for () { // Minimum execution time: 4_239 nanoseconds. Weight::from_parts(4_383_000, 3547).saturating_add(RocksDbWeight::get().reads(1_u64)) } + /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) + /// + /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: + /// 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `53` + // Estimated: `1502` + // Minimum execution time: 10_427 nanoseconds. + Weight::from_parts(10_682_000, 1502) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } } diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index fe58b910a94e..ef49b3396b5c 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -39,6 +39,7 @@ sp-io = { workspace = true } bp-runtime = { workspace = true } bp-header-chain = { workspace = true } pallet-xcm-bridge-hub-router = { workspace = true } +bp-xcm-bridge-hub-router = { workspace = true } polkadot-parachain-primitives = { workspace = true } [features] @@ -47,6 +48,7 @@ std = [ "bp-header-chain/std", "bp-messages/std", "bp-runtime/std", + "bp-xcm-bridge-hub-router/std", "bp-xcm-bridge-hub/std", "codec/std", "frame-support/std", @@ -75,6 +77,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/modules/xcm-bridge-hub/src/exporter.rs b/bridges/modules/xcm-bridge-hub/src/exporter.rs index 5afb9f36bc94..93b6093b42af 100644 --- a/bridges/modules/xcm-bridge-hub/src/exporter.rs +++ b/bridges/modules/xcm-bridge-hub/src/exporter.rs @@ -364,7 +364,7 @@ mod tests { use bp_runtime::RangeInclusiveExt; use bp_xcm_bridge_hub::{Bridge, BridgeLocations, BridgeState}; - use frame_support::assert_ok; + use frame_support::{assert_ok, traits::EnsureOrigin}; use pallet_bridge_messages::InboundLaneStorage; use xcm_builder::{NetworkExportTable, UnpaidRemoteExporter}; use xcm_executor::traits::{export_xcm, ConvertLocation}; @@ -381,9 +381,8 @@ mod tests { BridgedUniversalDestination::get() } - fn open_lane() -> (BridgeLocations, TestLaneIdType) { + fn open_lane(origin: RuntimeOrigin) -> (BridgeLocations, TestLaneIdType) { // open expected outbound lane - let origin = OpenBridgeOrigin::sibling_parachain_origin(); let with = bridged_asset_hub_universal_location(); let locations = XcmOverBridge::bridge_locations_from_origin(origin, Box::new(with.into())).unwrap(); @@ -439,7 +438,7 @@ mod tests { } fn open_lane_and_send_regular_message() -> (BridgeId, TestLaneIdType) { - let (locations, lane_id) = open_lane(); + let (locations, lane_id) = open_lane(OpenBridgeOrigin::sibling_parachain_origin()); // now let's try to enqueue message using our `ExportXcm` implementation export_xcm::( @@ -473,7 +472,7 @@ mod tests { fn exporter_does_not_suspend_the_bridge_if_outbound_bridge_queue_is_not_congested() { run_test(|| { let (bridge_id, _) = open_lane_and_send_regular_message(); - assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); + assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -490,7 +489,7 @@ mod tests { } open_lane_and_send_regular_message(); - assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); + assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); }); } @@ -502,11 +501,11 @@ mod tests { open_lane_and_send_regular_message(); } - assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); + assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); open_lane_and_send_regular_message(); - assert!(TestLocalXcmChannelManager::is_bridge_suspened()); + assert!(TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Suspended); }); } @@ -523,7 +522,7 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD + 1, ); - assert!(!TestLocalXcmChannelManager::is_bridge_resumed()); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Suspended); }); } @@ -537,7 +536,7 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD, ); - assert!(!TestLocalXcmChannelManager::is_bridge_resumed()); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -554,7 +553,7 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD, ); - assert!(TestLocalXcmChannelManager::is_bridge_resumed()); + assert!(TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -648,7 +647,10 @@ mod tests { let dest = Location::new(2, BridgedUniversalDestination::get()); // open bridge - let (_, expected_lane_id) = open_lane(); + let origin = OpenBridgeOrigin::sibling_parachain_origin(); + let origin_as_location = + OpenBridgeOriginOf::::try_origin(origin.clone()).unwrap(); + let (_, expected_lane_id) = open_lane(origin); // check before - no messages assert_eq!( @@ -662,18 +664,24 @@ mod tests { ); // send `ExportMessage(message)` by `UnpaidRemoteExporter`. - TestExportXcmWithXcmOverBridge::set_origin_for_execute(SiblingLocation::get()); + ExecuteXcmOverSendXcm::set_origin_for_execute(origin_as_location); assert_ok!(send_xcm::< UnpaidRemoteExporter< NetworkExportTable, - TestExportXcmWithXcmOverBridge, + ExecuteXcmOverSendXcm, UniversalLocation, >, >(dest.clone(), Xcm::<()>::default())); + // we need to set `UniversalLocation` for `sibling_parachain_origin` for + // `XcmOverBridgeWrappedWithExportMessageRouterInstance`. + ExportMessageOriginUniversalLocation::set(Some(SiblingUniversalLocation::get())); // send `ExportMessage(message)` by `pallet_xcm_bridge_hub_router`. - TestExportXcmWithXcmOverBridge::set_origin_for_execute(SiblingLocation::get()); - assert_ok!(send_xcm::(dest.clone(), Xcm::<()>::default())); + ExecuteXcmOverSendXcm::set_origin_for_execute(SiblingLocation::get()); + assert_ok!(send_xcm::( + dest.clone(), + Xcm::<()>::default() + )); // check after - a message ready to be relayed assert_eq!( @@ -765,7 +773,7 @@ mod tests { ); // ok - let _ = open_lane(); + let _ = open_lane(OpenBridgeOrigin::sibling_parachain_origin()); let mut dest_wrapper = Some(bridged_relative_destination()); assert_ok!(XcmOverBridge::validate( BridgedRelayNetwork::get(), @@ -780,4 +788,77 @@ mod tests { assert_eq!(None, dest_wrapper); }); } + + #[test] + fn congestion_with_pallet_xcm_bridge_hub_router_works() { + run_test(|| { + // valid routable destination + let dest = Location::new(2, BridgedUniversalDestination::get()); + + fn router_bridge_state() -> pallet_xcm_bridge_hub_router::BridgeState { + pallet_xcm_bridge_hub_router::Bridge::< + TestRuntime, + XcmOverBridgeWrappedWithExportMessageRouterInstance, + >::get() + } + + // open two bridges + let origin = OpenBridgeOrigin::sibling_parachain_origin(); + let origin_as_location = + OpenBridgeOriginOf::::try_origin(origin.clone()).unwrap(); + let (bridge_1, expected_lane_id_1) = open_lane(origin); + + // we need to set `UniversalLocation` for `sibling_parachain_origin` for + // `XcmOverBridgeWrappedWithExportMessageRouterInstance`. + ExportMessageOriginUniversalLocation::set(Some(SiblingUniversalLocation::get())); + + // check before + // bridges are opened + assert_eq!( + XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, + BridgeState::Opened + ); + + // the router is uncongested + assert!(!router_bridge_state().is_congested); + assert!(!TestLocalXcmChannelManager::is_bridge_suspended(bridge_1.bridge_id())); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); + + // make bridges congested with sending too much messages + for _ in 1..(OUTBOUND_LANE_CONGESTED_THRESHOLD + 2) { + // send `ExportMessage(message)` by `pallet_xcm_bridge_hub_router`. + ExecuteXcmOverSendXcm::set_origin_for_execute(origin_as_location.clone()); + assert_ok!(send_xcm::( + dest.clone(), + Xcm::<()>::default() + )); + } + + // checks after + // bridges are suspended + assert_eq!( + XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, + BridgeState::Suspended, + ); + // the router is congested + assert!(router_bridge_state().is_congested); + assert!(TestLocalXcmChannelManager::is_bridge_suspended(bridge_1.bridge_id())); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); + + // make bridges uncongested to trigger resume signal + XcmOverBridge::on_bridge_messages_delivered( + expected_lane_id_1, + OUTBOUND_LANE_UNCONGESTED_THRESHOLD, + ); + + // bridge is again opened + assert_eq!( + XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, + BridgeState::Opened + ); + // the router is uncongested + assert!(!router_bridge_state().is_congested); + assert!(TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); + }) + } } diff --git a/bridges/modules/xcm-bridge-hub/src/lib.rs b/bridges/modules/xcm-bridge-hub/src/lib.rs index 1b2536598a20..682db811efa7 100644 --- a/bridges/modules/xcm-bridge-hub/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub/src/lib.rs @@ -145,8 +145,8 @@ use bp_messages::{LaneState, MessageNonce}; use bp_runtime::{AccountIdOf, BalanceOf, RangeInclusiveExt}; -pub use bp_xcm_bridge_hub::{Bridge, BridgeId, BridgeState}; -use bp_xcm_bridge_hub::{BridgeLocations, BridgeLocationsError, LocalXcmChannelManager}; +pub use bp_xcm_bridge_hub::{Bridge, BridgeId, BridgeState, LocalXcmChannelManager}; +use bp_xcm_bridge_hub::{BridgeLocations, BridgeLocationsError}; use frame_support::{traits::fungible::MutateHold, DefaultNoBound}; use frame_system::Config as SystemConfig; use pallet_bridge_messages::{Config as BridgeMessagesConfig, LanesManagerError}; diff --git a/bridges/modules/xcm-bridge-hub/src/mock.rs b/bridges/modules/xcm-bridge-hub/src/mock.rs index 9f06b99ef6d5..d186507dab17 100644 --- a/bridges/modules/xcm-bridge-hub/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub/src/mock.rs @@ -24,10 +24,10 @@ use bp_messages::{ }; use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, HashOf}; use bp_xcm_bridge_hub::{BridgeId, LocalXcmChannelManager}; -use codec::Encode; +use codec::{Decode, Encode}; use frame_support::{ assert_ok, derive_impl, parameter_types, - traits::{EnsureOrigin, Equals, Everything, OriginTrait}, + traits::{EnsureOrigin, Equals, Everything, Get, OriginTrait}, weights::RuntimeDbWeight, }; use polkadot_parachain_primitives::primitives::Sibling; @@ -44,7 +44,7 @@ use xcm_builder::{ InspectMessageQueues, NetworkExportTable, NetworkExportTableItem, ParentIsPreset, SiblingParachainConvertsVia, }; -use xcm_executor::XcmExecutor; +use xcm_executor::{traits::ConvertOrigin, XcmExecutor}; pub type AccountId = AccountId32; pub type Balance = u64; @@ -63,7 +63,7 @@ frame_support::construct_runtime! { Balances: pallet_balances::{Pallet, Event}, Messages: pallet_bridge_messages::{Pallet, Call, Event}, XcmOverBridge: pallet_xcm_bridge_hub::{Pallet, Call, HoldReason, Event}, - XcmOverBridgeRouter: pallet_xcm_bridge_hub_router, + XcmOverBridgeWrappedWithExportMessageRouter: pallet_xcm_bridge_hub_router = 57, } } @@ -208,17 +208,27 @@ impl pallet_xcm_bridge_hub::Config for TestRuntime { type BlobDispatcher = TestBlobDispatcher; } -impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { +/// A router instance simulates a scenario where the router is deployed on a different chain than +/// the `MessageExporter`. This means that the router sends an `ExportMessage`. +pub type XcmOverBridgeWrappedWithExportMessageRouterInstance = (); +impl pallet_xcm_bridge_hub_router::Config + for TestRuntime +{ type RuntimeEvent = RuntimeEvent; type WeightInfo = (); - type UniversalLocation = UniversalLocation; + type UniversalLocation = ExportMessageOriginUniversalLocation; type SiblingBridgeHubLocation = BridgeHubLocation; type BridgedNetworkId = BridgedRelayNetwork; type Bridges = NetworkExportTable; type DestinationVersion = AlwaysLatest; - type ToBridgeHubSender = TestExportXcmWithXcmOverBridge; + // We convert to root `here` location with `BridgeHubLocationXcmOriginAsRoot` + type BridgeHubOrigin = frame_system::EnsureRoot; + // **Note**: The crucial part is that `ExportMessage` is processed by `XcmExecutor`, which + // calls the `ExportXcm` implementation of `pallet_xcm_bridge_hub` as the + // `MessageExporter`. + type ToBridgeHubSender = ExecuteXcmOverSendXcm; type LocalXcmChannelManager = TestLocalXcmChannelManager; type ByteFee = ConstU128<0>; @@ -230,7 +240,7 @@ impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; type XcmSender = (); type AssetTransactor = (); - type OriginConverter = (); + type OriginConverter = BridgeHubLocationXcmOriginAsRoot; type IsReserve = (); type IsTeleporter = (); type UniversalLocation = UniversalLocation; @@ -270,8 +280,8 @@ thread_local! { /// /// Note: The crucial part is that `ExportMessage` is processed by `XcmExecutor`, which calls the /// `ExportXcm` implementation of `pallet_xcm_bridge_hub` as `MessageExporter`. -pub struct TestExportXcmWithXcmOverBridge; -impl SendXcm for TestExportXcmWithXcmOverBridge { +pub struct ExecuteXcmOverSendXcm; +impl SendXcm for ExecuteXcmOverSendXcm { type Ticket = Xcm<()>; fn validate( @@ -298,7 +308,7 @@ impl SendXcm for TestExportXcmWithXcmOverBridge { Ok(hash) } } -impl InspectMessageQueues for TestExportXcmWithXcmOverBridge { +impl InspectMessageQueues for ExecuteXcmOverSendXcm { fn clear_messages() { todo!() } @@ -307,12 +317,51 @@ impl InspectMessageQueues for TestExportXcmWithXcmOverBridge { todo!() } } -impl TestExportXcmWithXcmOverBridge { +impl ExecuteXcmOverSendXcm { pub fn set_origin_for_execute(origin: Location) { EXECUTE_XCM_ORIGIN.with(|o| *o.borrow_mut() = Some(origin)); } } +/// A dynamic way to set different universal location for the origin which sends `ExportMessage`. +pub struct ExportMessageOriginUniversalLocation; +impl ExportMessageOriginUniversalLocation { + pub(crate) fn set(universal_location: Option) { + EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION.with(|o| *o.borrow_mut() = universal_location); + } +} +impl Get for ExportMessageOriginUniversalLocation { + fn get() -> InteriorLocation { + EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION.with(|o| { + o.borrow() + .clone() + .expect("`EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION` is not set!") + }) + } +} +thread_local! { + pub static EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION: RefCell> = RefCell::new(None); +} + +pub struct BridgeHubLocationXcmOriginAsRoot( + sp_std::marker::PhantomData, +); +impl ConvertOrigin + for BridgeHubLocationXcmOriginAsRoot +{ + fn convert_origin( + origin: impl Into, + kind: OriginKind, + ) -> Result { + let origin = origin.into(); + if kind == OriginKind::Xcm && origin.eq(&BridgeHubLocation::get()) { + Ok(RuntimeOrigin::root()) + } else { + Err(origin) + } + } +} + /// Type for specifying how a `Location` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. @@ -396,6 +445,9 @@ impl EnsureOrigin for OpenBridgeOrigin { } } +pub(crate) type OpenBridgeOriginOf = + >::OpenBridgeOrigin; + pub struct TestLocalXcmChannelManager; impl TestLocalXcmChannelManager { @@ -403,30 +455,82 @@ impl TestLocalXcmChannelManager { frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Congested", &true); } - pub fn is_bridge_suspened() -> bool { - frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Suspended") + fn suspended_key(bridge: &BridgeId) -> Vec { + [b"TestLocalXcmChannelManager.Suspended", bridge.encode().as_slice()].concat() + } + fn resumed_key(bridge: &BridgeId) -> Vec { + [b"TestLocalXcmChannelManager.Resumed", bridge.encode().as_slice()].concat() + } + + pub fn is_bridge_suspended(bridge: &BridgeId) -> bool { + frame_support::storage::unhashed::get_or_default(&Self::suspended_key(bridge)) } - pub fn is_bridge_resumed() -> bool { - frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Resumed") + pub fn is_bridge_resumed(bridge: &BridgeId) -> bool { + frame_support::storage::unhashed::get_or_default(&Self::resumed_key(bridge)) + } + + fn build_congestion_message(bridge: &BridgeId, is_congested: bool) -> Vec> { + use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; + #[allow(clippy::large_enum_variant)] + #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, scale_info::TypeInfo)] + enum Call { + #[codec(index = 57)] + XcmOverBridgeWrappedWithExportMessageRouter(XcmBridgeHubRouterCall), + } + + sp_std::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + fallback_max_weight: None, + call: Call::XcmOverBridgeWrappedWithExportMessageRouter( + XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: bridge.inner(), + is_congested, + } + ) + .encode() + .into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ] + } + + fn report_bridge_status( + local_origin: &Location, + bridge: &BridgeId, + is_congested: bool, + key: Vec, + ) -> Result<(), SendError> { + // send as BridgeHub would send to sibling chain + ExecuteXcmOverSendXcm::set_origin_for_execute(BridgeHubLocation::get()); + let result = send_xcm::( + local_origin.clone(), + Self::build_congestion_message(&bridge, is_congested).into(), + ); + + if result.is_ok() { + frame_support::storage::unhashed::put(&key, &true); + } + + result.map(|_| ()) } } impl LocalXcmChannelManager for TestLocalXcmChannelManager { - type Error = (); + type Error = SendError; fn is_congested(_with: &Location) -> bool { frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Congested") } - fn suspend_bridge(_local_origin: &Location, _bridge: BridgeId) -> Result<(), Self::Error> { - frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Suspended", &true); - Ok(()) + fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + Self::report_bridge_status(local_origin, &bridge, true, Self::suspended_key(&bridge)) } - fn resume_bridge(_local_origin: &Location, _bridge: BridgeId) -> Result<(), Self::Error> { - frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Resumed", &true); - Ok(()) + fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + Self::report_bridge_status(local_origin, &bridge, false, Self::resumed_key(&bridge)) } } diff --git a/bridges/primitives/xcm-bridge-hub/src/lib.rs b/bridges/primitives/xcm-bridge-hub/src/lib.rs index 63beb1bc3041..471cf402c34f 100644 --- a/bridges/primitives/xcm-bridge-hub/src/lib.rs +++ b/bridges/primitives/xcm-bridge-hub/src/lib.rs @@ -87,6 +87,11 @@ impl BridgeId { .into(), ) } + + /// Access the inner representation. + pub fn inner(&self) -> H256 { + self.0 + } } impl core::fmt::Debug for BridgeId { diff --git a/bridges/relays/utils/src/initialize.rs b/bridges/relays/utils/src/initialize.rs index 564ed1f0e5cc..deb9b9d059d5 100644 --- a/bridges/relays/utils/src/initialize.rs +++ b/bridges/relays/utils/src/initialize.rs @@ -52,9 +52,10 @@ pub fn initialize_logger(with_timestamp: bool) { format, ); - let env_filter = EnvFilter::from_default_env() - .add_directive(Level::WARN.into()) - .add_directive("bridge=info".parse().expect("static filter string is valid")); + let env_filter = EnvFilter::builder() + .with_default_directive(Level::WARN.into()) + .with_default_directive("bridge=info".parse().expect("static filter string is valid")) + .from_env_lossy(); let builder = SubscriberBuilder::default().with_env_filter(env_filter); diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 1b08bb39b434..c0789940a9e8 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -20,8 +20,7 @@ codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -alloy-primitives = { features = ["rlp"], workspace = true } -alloy-sol-types = { workspace = true } +alloy-core = { workspace = true, features = ["sol-types"] } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } @@ -49,8 +48,7 @@ hex-literal = { workspace = true, default-features = true } [features] default = ["std"] std = [ - "alloy-primitives/std", - "alloy-sol-types/std", + "alloy-core/std", "codec/std", "frame-benchmarking/std", "frame-support/std", @@ -83,6 +81,7 @@ runtime-benchmarks = [ "snowbridge-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs index 31a8992442d8..d213c8aad648 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs @@ -5,8 +5,7 @@ use snowbridge_core::{inbound::Log, ChannelId}; use sp_core::{RuntimeDebug, H160, H256}; use sp_std::prelude::*; -use alloy_primitives::B256; -use alloy_sol_types::{sol, SolEvent}; +use alloy_core::{primitives::B256, sol, sol_types::SolEvent}; sol! { event OutboundMessageAccepted(bytes32 indexed channel_id, uint64 nonce, bytes32 indexed message_id, bytes payload); @@ -36,7 +35,7 @@ impl TryFrom<&Log> for Envelope { fn try_from(log: &Log) -> Result { let topics: Vec = log.topics.iter().map(|x| B256::from_slice(x.as_ref())).collect(); - let event = OutboundMessageAccepted::decode_log(topics, &log.data, true) + let event = OutboundMessageAccepted::decode_raw_log(topics, &log.data, true) .map_err(|_| EnvelopeDecodeError)?; Ok(Self { @@ -44,7 +43,7 @@ impl TryFrom<&Log> for Envelope { channel_id: ChannelId::from(event.channel_id.as_ref()), nonce: event.nonce, message_id: H256::from(event.message_id.as_ref()), - payload: event.payload, + payload: event.payload.into(), }) } } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 675d4b691593..eed0656e9ca7 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -248,20 +248,6 @@ impl inbound_queue::Config for Test { type AssetTransactor = SuccessfulTransactor; } -pub fn last_events(n: usize) -> Vec { - frame_system::Pallet::::events() - .into_iter() - .rev() - .take(n) - .rev() - .map(|e| e.event) - .collect() -} - -pub fn expect_events(e: Vec) { - assert_eq!(last_events(e.len()), e); -} - pub fn setup() { System::set_block_number(1); Balances::mint_into( diff --git a/bridges/snowbridge/pallets/inbound-queue/src/test.rs b/bridges/snowbridge/pallets/inbound-queue/src/test.rs index 76d0b98e9eb4..aa99d63b4bf9 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/test.rs @@ -5,11 +5,11 @@ use super::*; use frame_support::{assert_noop, assert_ok}; use hex_literal::hex; use snowbridge_core::{inbound::Proof, ChannelId}; -use sp_keyring::AccountKeyring as Keyring; +use sp_keyring::Sr25519Keyring as Keyring; use sp_runtime::DispatchError; use sp_std::convert::From; -use crate::{Error, Event as InboundQueueEvent}; +use crate::Error; use crate::mock::*; @@ -35,17 +35,16 @@ fn test_submit_happy_path() { assert_eq!(Balances::balance(&channel_sovereign), initial_fund); assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); - expect_events(vec![InboundQueueEvent::MessageReceived { - channel_id: hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539") - .into(), - nonce: 1, - message_id: [ - 11, 25, 133, 51, 23, 68, 111, 211, 132, 94, 254, 17, 194, 252, 198, 233, 10, 193, - 156, 93, 72, 140, 65, 69, 79, 155, 154, 28, 141, 166, 171, 255, - ], - fee_burned: 110000000000, - } - .into()]); + + let events = frame_system::Pallet::::events(); + assert!( + events.iter().any(|event| matches!( + event.event, + RuntimeEvent::InboundQueue(Event::MessageReceived { nonce, ..}) + if nonce == 1 + )), + "no event emit." + ); let delivery_cost = InboundQueue::calculate_delivery_cost(message.encode().len() as u32); assert!( diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index f1e749afb997..d8e124d73d14 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -71,6 +71,7 @@ runtime-benchmarks = [ "snowbridge-pallet-outbound-queue/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index fa37c795b2d1..af002a5a965c 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -64,4 +64,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index ee8d481cec12..1f7f489c6b18 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -51,4 +51,5 @@ runtime-benchmarks = [ "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index e03560f66e24..bc5d401cd4f7 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -279,6 +279,7 @@ where // Call create_asset on foreign assets pallet. Transact { origin_kind: OriginKind::Xcm, + fallback_max_weight: Some(Weight::from_parts(400_000_000, 8_000)), call: ( create_call_index, asset_id, @@ -357,7 +358,9 @@ where }])), // Perform a deposit reserve to send to destination chain. DepositReserveAsset { - assets: Definite(vec![dest_para_fee_asset.clone(), asset].into()), + // Send over assets and unspent fees, XCM delivery fee will be charged from + // here. + assets: Wild(AllCounted(2)), dest: Location::new(1, [Parachain(dest_para_id)]), xcm: vec![ // Buy execution on target. diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index d47cb3cb7101..514a4c186696 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -43,4 +43,5 @@ runtime-benchmarks = [ "snowbridge-core/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 6f8e586bf5ff..cc1ed1288ca0 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -6,6 +6,8 @@ authors = ["Snowfork "] edition.workspace = true license = "Apache-2.0" categories = ["cryptography::cryptocurrencies"] +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -90,5 +92,6 @@ runtime-benchmarks = [ "snowbridge-pallet-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] fast-runtime = [] diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index dca5062ab310..5441dd822cac 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -13,7 +13,7 @@ use parachains_runtimes_test_utils::{ use snowbridge_core::{ChannelId, ParaId}; use snowbridge_pallet_ethereum_client_fixtures::*; use sp_core::{Get, H160, U256}; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use sp_runtime::{traits::Header, AccountId32, DigestItem, SaturatedConversion, Saturating}; use xcm::latest::prelude::*; use xcm_executor::XcmExecutor; @@ -431,7 +431,7 @@ pub fn ethereum_extrinsic( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where @@ -567,7 +567,7 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 9b6f6b73960b..198f9428f1dd 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Parachain node CLI utilities." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 6ebde0c2c653..83a3f2661e7a 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common node-side functionality and glue code to collate parachain blocks." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 0bb2de6bb9b8..33f24e30ccfb 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -33,6 +35,7 @@ sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs new file mode 100644 index 000000000000..9c53da6a6b7d --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -0,0 +1,144 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use futures::{stream::FusedStream, StreamExt}; +use sc_consensus::{BlockImport, StateAction}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_api::{ApiExt, CallApiAt, CallContext, Core, ProvideRuntimeApi, StorageProof}; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use sp_trie::proof_size_extension::ProofSizeExt; +use std::sync::Arc; + +/// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. +/// +/// This handle should be passed to [`Params`](super::Params) or can also be dropped if the node is +/// not running as collator. +pub struct SlotBasedBlockImportHandle { + receiver: TracingUnboundedReceiver<(Block, StorageProof)>, +} + +impl SlotBasedBlockImportHandle { + /// Returns the next item. + /// + /// The future will never return when the internal channel is closed. + pub async fn next(&mut self) -> (Block, StorageProof) { + loop { + if self.receiver.is_terminated() { + futures::pending!() + } else if let Some(res) = self.receiver.next().await { + return res + } + } + } +} + +/// Special block import for the slot based collator. +pub struct SlotBasedBlockImport { + inner: BI, + client: Arc, + sender: TracingUnboundedSender<(Block, StorageProof)>, +} + +impl SlotBasedBlockImport { + /// Create a new instance. + /// + /// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the + /// [`Params`](super::Params), so that this block import instance can communicate with the + /// collation task. If the node is not running as a collator, just dropping the handle is fine. + pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) { + let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000); + + (Self { sender, client, inner }, SlotBasedBlockImportHandle { receiver }) + } +} + +impl Clone for SlotBasedBlockImport { + fn clone(&self) -> Self { + Self { inner: self.inner.clone(), client: self.client.clone(), sender: self.sender.clone() } + } +} + +#[async_trait::async_trait] +impl BlockImport for SlotBasedBlockImport +where + Block: BlockT, + BI: BlockImport + Send + Sync, + BI::Error: Into, + Client: ProvideRuntimeApi + CallApiAt + Send + Sync, + Client::StateBackend: Send, + Client::Api: Core, +{ + type Error = sp_consensus::Error; + + async fn check_block( + &self, + block: sc_consensus::BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } + + async fn import_block( + &self, + mut params: sc_consensus::BlockImportParams, + ) -> Result { + // If the channel exists and it is required to execute the block, we will execute the block + // here. This is done to collect the storage proof and to prevent re-execution, we push + // downwards the state changes. `StateAction::ApplyChanges` is ignored, because it either + // means that the node produced the block itself or the block was imported via state sync. + if !self.sender.is_closed() && !matches!(params.state_action, StateAction::ApplyChanges(_)) + { + let mut runtime_api = self.client.runtime_api(); + + runtime_api.set_call_context(CallContext::Onchain); + + runtime_api.record_proof(); + let recorder = runtime_api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + runtime_api.register_extension(ProofSizeExt::new(recorder)); + + let parent_hash = *params.header.parent_hash(); + + let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); + + runtime_api + .execute_block(parent_hash, block.clone()) + .map_err(|e| Box::new(e) as Box<_>)?; + + let storage_proof = + runtime_api.extract_proof().expect("Proof recording was enabled above; qed"); + + let state = self.client.state_at(parent_hash).map_err(|e| Box::new(e) as Box<_>)?; + let gen_storage_changes = runtime_api + .into_storage_changes(&state, parent_hash) + .map_err(sp_consensus::Error::ChainLookup)?; + + if params.header.state_root() != &gen_storage_changes.transaction_storage_root { + return Err(sp_consensus::Error::Other(Box::new( + sp_blockchain::Error::InvalidStateRoot, + ))) + } + + params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( + gen_storage_changes, + )); + + let _ = self.sender.unbounded_send((block, storage_proof)); + } + + self.inner.import_block(params).await.map_err(Into::into) + } +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 5b8151f6302c..abaeb8319a40 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -47,6 +47,8 @@ pub struct Params { pub collator_service: CS, /// Receiver channel for communication with the block builder task. pub collator_receiver: TracingUnboundedReceiver>, + /// The handle from the special slot based block import. + pub block_import_handle: super::SlotBasedBlockImportHandle, } /// Asynchronously executes the collation task for a parachain. @@ -55,28 +57,49 @@ pub struct Params { /// collations to the relay chain. It listens for new best relay chain block notifications and /// handles collator messages. If our parachain is scheduled on a core and we have a candidate, /// the task will build a collation and send it to the relay chain. -pub async fn run_collation_task(mut params: Params) -where +pub async fn run_collation_task( + Params { + relay_client, + collator_key, + para_id, + reinitialize, + collator_service, + mut collator_receiver, + mut block_import_handle, + }: Params, +) where Block: BlockT, CS: CollatorServiceInterface + Send + Sync + 'static, RClient: RelayChainInterface + Clone + 'static, { - let Ok(mut overseer_handle) = params.relay_client.overseer_handle() else { + let Ok(mut overseer_handle) = relay_client.overseer_handle() else { tracing::error!(target: LOG_TARGET, "Failed to get overseer handle."); return }; cumulus_client_collator::initialize_collator_subsystems( &mut overseer_handle, - params.collator_key, - params.para_id, - params.reinitialize, + collator_key, + para_id, + reinitialize, ) .await; - let collator_service = params.collator_service; - while let Some(collator_message) = params.collator_receiver.next().await { - handle_collation_message(collator_message, &collator_service, &mut overseer_handle).await; + loop { + futures::select! { + collator_message = collator_receiver.next() => { + let Some(message) = collator_message else { + return; + }; + + handle_collation_message(message, &collator_service, &mut overseer_handle).await; + }, + block_import_msg = block_import_handle.next().fuse() => { + // TODO: Implement me. + // Issue: https://github.com/paritytech/polkadot-sdk/issues/6495 + let _ = block_import_msg; + } + } } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 7453d3c89d08..09afa18e6fbb 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -28,6 +28,7 @@ //! during the relay chain block. After the block is built, the block builder task sends it to //! the collation task which compresses it and submits it to the collation-generation subsystem. +use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; use codec::Codec; use consensus_common::ParachainCandidate; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; @@ -36,32 +37,31 @@ use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::GetCoreSelectorApi; use cumulus_relay_chain_interface::RelayChainInterface; +use futures::FutureExt; use polkadot_primitives::{ CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, }; - use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_utils::mpsc::tracing_unbounded; - use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_aura::AuraApi; -use sp_core::crypto::Pair; +use sp_core::{crypto::Pair, traits::SpawnNamed}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Member}; - use std::{sync::Arc, time::Duration}; -use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; +pub use block_import::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; mod block_builder_task; +mod block_import; mod collation_task; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -93,13 +93,33 @@ pub struct Params { /// Drift slots by a fixed duration. This can be used to create more preferrable authoring /// timings. pub slot_drift: Duration, + /// The handle returned by [`SlotBasedBlockImport`]. + pub block_import_handle: SlotBasedBlockImportHandle, + /// Spawner for spawning futures. + pub spawner: Spawner, } /// Run aura-based block building and collation task. -pub fn run( - params: Params, -) -> (impl futures::Future, impl futures::Future) -where +pub fn run( + Params { + create_inherent_data_providers, + block_import, + para_client, + para_backend, + relay_client, + code_hash_provider, + keystore, + collator_key, + para_id, + proposer, + collator_service, + authoring_duration, + reinitialize, + slot_drift, + block_import_handle, + spawner, + }: Params, +) where Block: BlockT, Client: ProvideRuntimeApi + BlockOf @@ -123,39 +143,50 @@ where P: Pair + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, + Spawner: SpawnNamed, { let (tx, rx) = tracing_unbounded("mpsc_builder_to_collator", 100); let collator_task_params = collation_task::Params { - relay_client: params.relay_client.clone(), - collator_key: params.collator_key, - para_id: params.para_id, - reinitialize: params.reinitialize, - collator_service: params.collator_service.clone(), + relay_client: relay_client.clone(), + collator_key, + para_id, + reinitialize, + collator_service: collator_service.clone(), collator_receiver: rx, + block_import_handle, }; let collation_task_fut = run_collation_task::(collator_task_params); let block_builder_params = block_builder_task::BuilderTaskParams { - create_inherent_data_providers: params.create_inherent_data_providers, - block_import: params.block_import, - para_client: params.para_client, - para_backend: params.para_backend, - relay_client: params.relay_client, - code_hash_provider: params.code_hash_provider, - keystore: params.keystore, - para_id: params.para_id, - proposer: params.proposer, - collator_service: params.collator_service, - authoring_duration: params.authoring_duration, + create_inherent_data_providers, + block_import, + para_client, + para_backend, + relay_client, + code_hash_provider, + keystore, + para_id, + proposer, + collator_service, + authoring_duration, collator_sender: tx, - slot_drift: params.slot_drift, + slot_drift, }; let block_builder_fut = run_block_builder::(block_builder_params); - (collation_task_fut, block_builder_fut) + spawner.spawn_blocking( + "slot-based-block-builder", + Some("slot-based-collator"), + block_builder_fut.boxed(), + ); + spawner.spawn_blocking( + "slot-based-collation", + Some("slot-based-collator"), + collation_task_fut.boxed(), + ); } /// Message to be sent from the block builder to the collation task. diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 4bc2f1d1e600..0f532a2101c4 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index bb760ae03f4d..e391481bc445 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index f3ee6fc2f7d2..7f0f4333c961 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index bc67678eedeb..b78df8d73eae 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 0d82cf648743..4f53e2bc1bc2 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [dependencies] async-trait = { workspace = true } diff --git a/cumulus/client/parachain-inherent/src/mock.rs b/cumulus/client/parachain-inherent/src/mock.rs index 950cba2aaa7d..e08aca932564 100644 --- a/cumulus/client/parachain-inherent/src/mock.rs +++ b/cumulus/client/parachain-inherent/src/mock.rs @@ -17,17 +17,17 @@ use crate::{ParachainInherentData, INHERENT_IDENTIFIER}; use codec::Decode; use cumulus_primitives_core::{ - relay_chain, InboundDownwardMessage, InboundHrmpMessage, ParaId, PersistedValidationData, + relay_chain, relay_chain::UpgradeGoAhead, InboundDownwardMessage, InboundHrmpMessage, ParaId, + PersistedValidationData, }; use cumulus_primitives_parachain_inherent::MessageQueueChain; +use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use sc_client_api::{Backend, StorageProvider}; use sp_crypto_hashing::twox_128; use sp_inherents::{InherentData, InherentDataProvider}; use sp_runtime::traits::Block; use std::collections::BTreeMap; -use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; - /// Relay chain slot duration, in milliseconds. pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; @@ -68,10 +68,12 @@ pub struct MockValidationDataInherentDataProvider { pub xcm_config: MockXcmConfig, /// Inbound downward XCM messages to be injected into the block. pub raw_downward_messages: Vec>, - // Inbound Horizontal messages sorted by channel. + /// Inbound Horizontal messages sorted by channel. pub raw_horizontal_messages: Vec<(ParaId, Vec)>, - // Additional key-value pairs that should be injected. + /// Additional key-value pairs that should be injected. pub additional_key_values: Option, Vec)>>, + /// Whether upgrade go ahead should be set. + pub upgrade_go_ahead: Option, } /// Something that can generate randomness. @@ -176,6 +178,7 @@ impl> InherentDataProvider sproof_builder.current_slot = ((relay_parent_number / RELAY_CHAIN_SLOT_DURATION_MILLIS) as u64).into(); + sproof_builder.upgrade_go_ahead = self.upgrade_go_ahead; // Process the downward messages and set up the correct head let mut downward_messages = Vec::new(); let mut dmq_mqc = MessageQueueChain::new(self.xcm_config.starting_dmq_mqc_head); diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 3127dd26fcaa..762837e0bb11 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true description = "Parachain PoV recovery" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 6f1b74191be7..9e6e8da929bb 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait for Polkadot full-nodes." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index a496fab050dd..2b9e72bbeca6 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Common interface for different relay chain datasources." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 95ecadc8bd06..0fad188bb1ab 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Minimal node implementation to be used in tandem with RPC or light-client mode." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index fb4cb4ceed4e..162f5ad0e9e8 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait that connects to a remote RPC-node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 8e9e41ca89dc..193283648f19 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -5,12 +5,15 @@ authors.workspace = true edition.workspace = true description = "Common functions used to assemble the components of a parachain node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] futures = { workspace = true } +futures-timer = { workspace = true } # Substrate sc-client-api = { workspace = true, default-features = true } diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index c08148928b7c..fcda79f1d5c1 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "AURA consensus extension pallet for parachains" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 936526290d93..ae85a108fe72 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -56,6 +56,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 3cb0394c4b95..c911f8531da2 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Base pallet for cumulus-based parachains" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -36,7 +38,6 @@ sp-version = { workspace = true } # Polkadot polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } polkadot-runtime-parachains = { workspace = true } -polkadot-runtime-common = { optional = true, workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } @@ -82,7 +83,6 @@ std = [ "log/std", "pallet-message-queue/std", "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", "scale-info/std", "sp-core/std", @@ -107,17 +107,16 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-message-queue/try-runtime", - "polkadot-runtime-common?/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index da6f0fd03efb..629818f9c4cc 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Proc macros provided by the parachain-system pallet" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 39fc8321a072..0fa759357f65 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1636,7 +1636,7 @@ impl InspectMessageQueues for Pallet { } #[cfg(feature = "runtime-benchmarks")] -impl polkadot_runtime_common::xcm_sender::EnsureForParachain for Pallet { +impl polkadot_runtime_parachains::EnsureForParachain for Pallet { fn ensure(para_id: ParaId) { if let ChannelStatus::Closed = Self::get_channel_status(para_id) { Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id) diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 5fd1939e93a0..2088361bf11a 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Adds functionality to migrate from a Solo to a Parachain" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 35d7a083b061..ff9be866d48f 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -5,6 +5,8 @@ name = "cumulus-pallet-xcm" version = "0.7.0" license = "Apache-2.0" description = "Pallet for stuff specific to parachains' usage of XCM" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 9c7470eda6da..432be3027e05 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Pallet to queue outbound and inbound XCMP messages." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -85,6 +87,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-parachain-system/try-runtime", diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 6d436bdf799a..ae4d7fc1d115 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Logic which is common to all parachain runtimes" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -90,4 +92,5 @@ runtime-benchmarks = [ "polkadot-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 23edaf6bfe65..8282d12d317f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index c0d42cf2758e..9dad323aa19c 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -370,6 +370,8 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let destination: $crate::impls::Location = ::child_location_of(recipient); let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); + $crate::impls::dmp::Pallet::<::Runtime>::make_parachain_reachable(recipient); + // Send XCM `Transact` $crate::impls::assert_ok!(]>::XcmPallet::send( root_origin, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index b776cafb2545..cd2b41e5198f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -23,6 +23,7 @@ pub use pallet_message_queue; pub use pallet_xcm; // Polkadot +pub use polkadot_runtime_parachains::dmp::Pallet as Dmp; pub use xcm::{ prelude::{ AccountId32, All, Asset, AssetId, BuyExecution, DepositAsset, ExpectTransactStatus, @@ -156,6 +157,8 @@ macro_rules! test_relay_is_trusted_teleporter { // Send XCM message from Relay <$sender_relay>::execute_with(|| { + $crate::macros::Dmp::<<$sender_relay as $crate::macros::Chain>::Runtime>::make_parachain_reachable(<$receiver_para>::para_id()); + assert_ok!(<$sender_relay as [<$sender_relay Pallet>]>::XcmPallet::limited_teleport_assets( origin.clone(), bx!(para_destination.clone().into()), diff --git a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs index 9125c976525e..380f4983ad98 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs @@ -31,7 +31,7 @@ pub fn xcm_transact_paid_execution( VersionedXcm::from(Xcm(vec![ WithdrawAsset(fees.clone().into()), BuyExecution { fees, weight_limit }, - Transact { origin_kind, call }, + Transact { origin_kind, call, fallback_max_weight: None }, RefundSurplus, DepositAsset { assets: All.into(), @@ -53,7 +53,7 @@ pub fn xcm_transact_unpaid_execution( VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, - Transact { origin_kind, call }, + Transact { origin_kind, call, fallback_max_weight: None }, ])) } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs index baec7d20f415..fb95c361f089 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; + use super::reserve_transfer::*; use crate::{ imports::*, @@ -777,6 +779,8 @@ fn transfer_native_asset_from_relay_to_para_through_asset_hub() { xcm: xcm_on_final_dest, }]); + Dmp::make_parachain_reachable(AssetHubRococo::para_id()); + // First leg is a teleport, from there a local-reserve-transfer to final dest ::XcmPallet::transfer_assets_using_type_and_then( t.signed_origin, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 698ef2c9e792..407a581afeb9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -14,6 +14,7 @@ // limitations under the License. use crate::imports::*; +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; fn relay_to_para_sender_assertions(t: RelayToParaTest) { @@ -115,7 +116,7 @@ pub fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm(pallet_xcm::Event::FeesPaid { .. }) => {}, ] ); @@ -274,7 +275,7 @@ fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { t.args.dest.clone() ), }, - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -305,7 +306,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { owner: *owner == t.sender.account_id, balance: *balance == t.args.amount, }, - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -487,6 +488,11 @@ pub fn para_to_para_through_hop_receiver_assertions(t: Test DispatchResult { + let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { + unimplemented!("Destination is not a parachain?") + }; + + Dmp::make_parachain_reachable(para_id); ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -546,6 +552,13 @@ fn para_to_system_para_reserve_transfer_assets(t: ParaToSystemParaTest) -> Dispa fn para_to_para_through_relay_limited_reserve_transfer_assets( t: ParaToParaThroughRelayTest, ) -> DispatchResult { + let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { + unimplemented!("Destination is not a parachain?") + }; + + Rococo::ext_wrapper(|| { + Dmp::make_parachain_reachable(para_id); + }); ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs index 69111d38bcac..8648c8ce9311 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -29,6 +29,7 @@ use frame_support::{ use parachains_common::AccountId; use polkadot_runtime_common::impls::VersionedLocatableAsset; use rococo_runtime_constants::currency::GRAND; +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; use xcm_executor::traits::ConvertLocation; // Fund Treasury account on Asset Hub from Treasury account on Relay Chain with ROCs. @@ -64,6 +65,7 @@ fn spend_roc_on_asset_hub() { treasury_balance * 2, )); + Dmp::make_parachain_reachable(1000); let native_asset = Location::here(); let asset_hub_location: Location = [Parachain(1000)].into(); let treasury_location: Location = (Parent, PalletInstance(18)).into(); @@ -199,6 +201,8 @@ fn create_and_claim_treasury_spend_in_usdt() { // create a conversion rate from `asset_kind` to the native currency. assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); + Dmp::make_parachain_reachable(1000); + // create and approve a treasury spend. assert_ok!(Treasury::spend( root, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 3cca99fbfe5c..36630e2d2221 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -106,6 +106,7 @@ mod imports { pub type ParaToParaThroughRelayTest = Test; pub type ParaToParaThroughAHTest = Test; pub type RelayToParaThroughAHTest = Test; + pub type PenpalToRelayThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs index a0fc82fba6ef..91ebdda16828 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; + use super::reserve_transfer::*; use crate::{ imports::*, @@ -658,13 +660,13 @@ fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explici } // =============================================================== -// ===== Transfer - Native Asset - Relay->AssetHub->Parachain ==== +// ====== Transfer - Native Asset - Relay->AssetHub->Penpal ====== // =============================================================== -/// Transfers of native asset Relay to Parachain (using AssetHub reserve). Parachains want to avoid +/// Transfers of native asset Relay to Penpal (using AssetHub reserve). Parachains want to avoid /// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their /// Sovereign Account on Asset Hub. #[test] -fn transfer_native_asset_from_relay_to_para_through_asset_hub() { +fn transfer_native_asset_from_relay_to_penpal_through_asset_hub() { // Init values for Relay let destination = Westend::child_location_of(PenpalA::para_id()); let sender = WestendSender::get(); @@ -778,6 +780,8 @@ fn transfer_native_asset_from_relay_to_para_through_asset_hub() { xcm: xcm_on_final_dest, }]); + Dmp::make_parachain_reachable(AssetHubWestend::para_id()); + // First leg is a teleport, from there a local-reserve-transfer to final dest ::XcmPallet::transfer_assets_using_type_and_then( t.signed_origin, @@ -820,6 +824,137 @@ fn transfer_native_asset_from_relay_to_para_through_asset_hub() { assert!(receiver_assets_after < receiver_assets_before + amount_to_send); } +// =============================================================== +// ===== Transfer - Native Asset - Penpal->AssetHub->Relay ======= +// =============================================================== +/// Transfers of native asset Penpal to Relay (using AssetHub reserve). Parachains want to avoid +/// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their +/// Sovereign Account on Asset Hub. +#[test] +fn transfer_native_asset_from_penpal_to_relay_through_asset_hub() { + // Init values for Penpal + let destination = RelayLocation::get(); + let sender = PenpalASender::get(); + let amount_to_send: Balance = WESTEND_ED * 100; + + // Init values for Penpal + let relay_native_asset_location = RelayLocation::get(); + let receiver = WestendReceiver::get(); + + // Init Test + let test_args = TestContext { + sender: sender.clone(), + receiver: receiver.clone(), + args: TestArgs::new_para( + destination.clone(), + receiver.clone(), + amount_to_send, + (Parent, amount_to_send).into(), + None, + 0, + ), + }; + let mut test = PenpalToRelayThroughAHTest::new(test_args); + + let sov_penpal_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalA::para_id()), + ); + // fund Penpal's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + relay_native_asset_location.clone(), + sender.clone(), + amount_to_send * 2, + ); + // fund Penpal's SA on AssetHub with the assets held in reserve + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ah.clone().into(), amount_to_send * 2)]); + + // prefund Relay checking account so we accept teleport "back" from AssetHub + let check_account = + Westend::execute_with(|| ::XcmPallet::check_account()); + Westend::fund_accounts(vec![(check_account, amount_to_send)]); + + // Query initial balances + let sender_balance_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &sender) + }); + let sov_penpal_on_ah_before = AssetHubWestend::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah.clone()) + }); + let receiver_balance_before = Westend::execute_with(|| { + ::Balances::free_balance(receiver.clone()) + }); + + fn transfer_assets_dispatchable(t: PenpalToRelayThroughAHTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + let asset_hub_location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); + let context = PenpalUniversalLocation::get(); + + // reanchor fees to the view of destination (Westend Relay) + let mut remote_fees = fee.clone().reanchored(&t.args.dest, &context).unwrap(); + if let Fungible(ref mut amount) = remote_fees.fun { + // we already spent some fees along the way, just use half of what we started with + *amount = *amount / 2; + } + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: t.args.weight_limit.clone() }, + DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }, + ]); + + // reanchor final dest (Westend Relay) to the view of hop (Asset Hub) + let mut dest = t.args.dest.clone(); + dest.reanchor(&asset_hub_location, &context).unwrap(); + // on Asset Hub + let xcm_on_hop = Xcm::<()>(vec![InitiateTeleport { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + dest, + xcm: xcm_on_final_dest, + }]); + + // First leg is a reserve-withdraw, from there a teleport to final dest + ::PolkadotXcm::transfer_assets_using_type_and_then( + t.signed_origin, + bx!(asset_hub_location.into()), + bx!(t.args.assets.into()), + bx!(TransferType::DestinationReserve), + bx!(fee.id.into()), + bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(xcm_on_hop)), + t.args.weight_limit, + ) + } + test.set_dispatchable::(transfer_assets_dispatchable); + test.assert(); + + // Query final balances + let sender_balance_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &sender) + }); + let sov_penpal_on_ah_after = AssetHubWestend::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah.clone()) + }); + let receiver_balance_after = Westend::execute_with(|| { + ::Balances::free_balance(receiver.clone()) + }); + + // Sender's asset balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); + // SA on AH balance is decreased by `amount_to_send` + assert_eq!(sov_penpal_on_ah_after, sov_penpal_on_ah_before - amount_to_send); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); +} + // ============================================================================================== // ==== Bidirectional Transfer - Native + Teleportable Foreign Assets - Parachain<->AssetHub ==== // ============================================================================================== @@ -839,7 +974,7 @@ fn bidirectional_transfer_multiple_assets_between_penpal_and_asset_hub() { // xcm to be executed at dest let xcm_on_dest = Xcm(vec![ // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover transport fees for); we + // reserved for fees (there are no further hops to cover delivery fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: t.args.beneficiary }, @@ -875,7 +1010,7 @@ fn bidirectional_transfer_multiple_assets_between_penpal_and_asset_hub() { // xcm to be executed at dest let xcm_on_dest = Xcm(vec![ // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover transport fees for); we + // reserved for fees (there are no further hops to cover delivery fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: t.args.beneficiary }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 558eab13e5c7..dc36fed42932 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -15,6 +15,7 @@ use crate::{create_pool_with_wnd_on, foreign_balance_on, imports::*}; use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; fn relay_to_para_sender_assertions(t: RelayToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -115,7 +116,7 @@ pub fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm(pallet_xcm::Event::FeesPaid { .. }) => {}, ] ); @@ -274,7 +275,7 @@ fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { t.args.dest.clone() ), }, - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -305,7 +306,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { owner: *owner == t.sender.account_id, balance: *balance == t.args.amount, }, - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -487,6 +488,11 @@ pub fn para_to_para_through_hop_receiver_assertions(t: Test DispatchResult { + let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { + unimplemented!("Destination is not a parachain?") + }; + + Dmp::make_parachain_reachable(para_id); ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -533,6 +539,13 @@ fn para_to_system_para_reserve_transfer_assets(t: ParaToSystemParaTest) -> Dispa fn para_to_para_through_relay_limited_reserve_transfer_assets( t: ParaToParaThroughRelayTest, ) -> DispatchResult { + let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { + unimplemented!("Destination is not a parachain?") + }; + + Westend::ext_wrapper(|| { + Dmp::make_parachain_reachable(para_id); + }); ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs index 544b05360521..bc00106b47c1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs @@ -44,7 +44,7 @@ fn test_set_asset_claimer_within_a_chain() { type RuntimeCall = ::RuntimeCall; let asset_trap_xcm = Xcm::::builder_unsafe() - .set_asset_claimer(bob_location.clone()) + .set_hints(vec![AssetClaimer { location: bob_location.clone() }]) .withdraw_asset(assets.clone()) .clear_origin() .build(); @@ -116,7 +116,7 @@ fn test_set_asset_claimer_between_the_chains() { let assets: Assets = (Parent, trap_amount).into(); type RuntimeCall = ::RuntimeCall; let trap_xcm = Xcm::::builder_unsafe() - .set_asset_claimer(alice_bh_sibling.clone()) + .set_hints(vec![AssetClaimer { location: alice_bh_sibling.clone() }]) .withdraw_asset(assets.clone()) .clear_origin() .build(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs index 3c53cfb261be..7e881a332a53 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs @@ -43,10 +43,10 @@ fn transfer_and_transact_in_same_xcm( // xcm to be executed at dest let xcm_on_dest = Xcm(vec![ - Transact { origin_kind: OriginKind::Xcm, call }, + Transact { origin_kind: OriginKind::Xcm, call, fallback_max_weight: None }, ExpectTransactStatus(MaybeErrorCode::Success), // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover transport fees for); we + // reserved for fees (there are no further hops to cover delivery fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, DepositAsset { assets: Wild(All), beneficiary }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index c303e6411d33..3b53557fc05c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -20,6 +20,7 @@ use emulated_integration_tests_common::{ }; use frame_support::traits::fungibles::{Inspect, Mutate}; use polkadot_runtime_common::impls::VersionedLocatableAsset; +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; use xcm_executor::traits::ConvertLocation; #[test] @@ -58,6 +59,8 @@ fn create_and_claim_treasury_spend() { // create a conversion rate from `asset_kind` to the native currency. assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); + Dmp::make_parachain_reachable(1000); + // create and approve a treasury spend. assert_ok!(Treasury::spend( root, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 33ab1e70b97b..a2a61660afff 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -16,7 +16,7 @@ use crate::tests::*; fn send_assets_over_bridge(send_fn: F) { - // fund the AHR's SA on BHR for paying bridge transport fees + // fund the AHR's SA on BHR for paying bridge delivery fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // set XCM versions diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs index 1ae3a1b15805..70e7a7a3ddd3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs @@ -58,7 +58,7 @@ fn register_rococo_asset_on_wah_from_rah() { let destination = asset_hub_westend_location(); - // fund the RAH's SA on RBH for paying bridge transport fees + // fund the RAH's SA on RBH for paying bridge delivery fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // set XCM versions diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 931a3128f826..cfcb581238e6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; + use crate::tests::*; #[test] @@ -38,6 +40,8 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { + Dmp::make_parachain_reachable(BridgeHubRococo::para_id()); + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), @@ -65,7 +69,7 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { let native_token = Location::parent(); let amount = ASSET_HUB_ROCOCO_ED * 1_000; - // fund the AHR's SA on BHR for paying bridge transport fees + // fund the AHR's SA on BHR for paying bridge delivery fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // fund sender AssetHubRococo::fund_accounts(vec![(AssetHubRococoSender::get().into(), amount * 10)]); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index d59553574c26..c72d5045ddc0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -84,7 +84,11 @@ fn create_agent() { let remote_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), - Transact { origin_kind: OriginKind::Xcm, call: create_agent_call.encode().into() }, + Transact { + origin_kind: OriginKind::Xcm, + call: create_agent_call.encode().into(), + fallback_max_weight: None, + }, ])); // Rococo Global Consensus @@ -138,7 +142,11 @@ fn create_channel() { let create_agent_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), - Transact { origin_kind: OriginKind::Xcm, call: create_agent_call.encode().into() }, + Transact { + origin_kind: OriginKind::Xcm, + call: create_agent_call.encode().into(), + fallback_max_weight: None, + }, ])); let create_channel_call = @@ -147,7 +155,11 @@ fn create_channel() { let create_channel_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), - Transact { origin_kind: OriginKind::Xcm, call: create_channel_call.encode().into() }, + Transact { + origin_kind: OriginKind::Xcm, + call: create_channel_call.encode().into(), + fallback_max_weight: None, + }, ])); // Rococo Global Consensus diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index ab09517339db..cc90c10b54bc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -17,7 +17,7 @@ use crate::{create_pool_with_native_on, tests::*}; use xcm::latest::AssetTransferFilter; fn send_assets_over_bridge(send_fn: F) { - // fund the AHW's SA on BHW for paying bridge transport fees + // fund the AHW's SA on BHW for paying bridge delivery fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // set XCM versions @@ -592,7 +592,7 @@ fn do_send_pens_and_wnds_from_penpal_westend_via_ahw_to_asset_hub_rococo( // XCM to be executed at dest (Rococo Asset Hub) let xcm_on_dest = Xcm(vec![ // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover transport fees for); we + // reserved for fees (there are no further hops to cover delivery fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, // deposit everything to final beneficiary diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs index 424f1e55956b..952fc35e6703 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs @@ -82,7 +82,7 @@ fn register_asset_on_rah_from_wah(bridged_asset_at_rah: Location) { let destination = asset_hub_rococo_location(); - // fund the WAH's SA on WBH for paying bridge transport fees + // fund the WAH's SA on WBH for paying bridge delivery fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // set XCM versions diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index 787d7dc842cb..60f8af2242f9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use rococo_westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; + use crate::tests::*; #[test] @@ -38,6 +40,8 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable // Westend Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Westend::execute_with(|| { + Dmp::make_parachain_reachable(BridgeHubWestend::para_id()); + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), @@ -65,7 +69,7 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { let native_token = Location::parent(); let amount = ASSET_HUB_WESTEND_ED * 1_000; - // fund the AHR's SA on BHR for paying bridge transport fees + // fund the AHR's SA on BHR for paying bridge delivery fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // fund sender AssetHubWestend::fund_accounts(vec![(AssetHubWestendSender::get().into(), amount * 10)]); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs index db42704dae61..f6a3c53c4bf5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs @@ -49,10 +49,10 @@ fn transfer_and_transact_in_same_xcm( // xcm to be executed at dest let xcm_on_dest = Xcm(vec![ - Transact { origin_kind: OriginKind::Xcm, call }, + Transact { origin_kind: OriginKind::Xcm, call, fallback_max_weight: None }, ExpectTransactStatus(MaybeErrorCode::Success), // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover transport fees for); we + // reserved for fees (there are no further hops to cover delivery fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, DepositAsset { assets: Wild(All), beneficiary }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs index 80b82e0c446f..802fed1e681d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs @@ -41,6 +41,7 @@ fn fellows_whitelist_call() { ) .encode() .into(), + fallback_max_weight: None } ]))), }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs index 8418e3da3bba..ed7c9bafc607 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs @@ -20,6 +20,7 @@ use frame_support::{ }; use polkadot_runtime_common::impls::VersionedLocatableAsset; use westend_runtime_constants::currency::UNITS; +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; use xcm_executor::traits::ConvertLocation; // Fund Fellowship Treasury from Westend Treasury and spend from Fellowship Treasury. @@ -57,6 +58,8 @@ fn fellowship_treasury_spend() { treasury_balance * 2, )); + Dmp::make_parachain_reachable(1000); + let native_asset = Location::here(); let asset_hub_location: Location = [Parachain(1000)].into(); let treasury_location: Location = (Parent, PalletInstance(37)).into(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs index 9915b1753ef6..554025e1ecfe 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs @@ -17,6 +17,7 @@ use crate::imports::*; use frame_support::traits::OnInitialize; use pallet_broker::{ConfigRecord, Configuration, CoreAssignment, CoreMask, ScheduleItem}; use rococo_runtime_constants::system_parachain::coretime::TIMESLICE_PERIOD; +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; use sp_runtime::Perbill; #[test] @@ -34,6 +35,10 @@ fn transact_hardcoded_weights_are_sane() { type CoretimeEvent = ::RuntimeEvent; type RelayEvent = ::RuntimeEvent; + Rococo::execute_with(|| { + Dmp::make_parachain_reachable(CoretimeRococo::para_id()); + }); + // Reserve a workload, configure broker and start sales. CoretimeRococo::execute_with(|| { // Hooks don't run in emulated tests - workaround as we need `on_initialize` to tick things diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs index 00530f80b958..900994b1afc1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs @@ -18,6 +18,7 @@ use frame_support::traits::OnInitialize; use pallet_broker::{ConfigRecord, Configuration, CoreAssignment, CoreMask, ScheduleItem}; use sp_runtime::Perbill; use westend_runtime_constants::system_parachain::coretime::TIMESLICE_PERIOD; +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; #[test] fn transact_hardcoded_weights_are_sane() { @@ -34,6 +35,10 @@ fn transact_hardcoded_weights_are_sane() { type CoretimeEvent = ::RuntimeEvent; type RelayEvent = ::RuntimeEvent; + Westend::execute_with(|| { + Dmp::make_parachain_reachable(CoretimeWestend::para_id()); + }); + // Reserve a workload, configure broker and start sales. CoretimeWestend::execute_with(|| { // Hooks don't run in emulated tests - workaround as we need `on_initialize` to tick things diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index aa6eebc5458f..53acd038cdf5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -21,6 +21,7 @@ sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } westend-runtime-constants = { workspace = true, default-features = true } +westend-runtime = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs new file mode 100644 index 000000000000..ea438f80552e --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs @@ -0,0 +1,550 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use frame_support::traits::ProcessMessageError; + +use codec::Encode; +use frame_support::sp_runtime::traits::Dispatchable; +use parachains_common::AccountId; +use people_westend_runtime::people::IdentityInfo; +use westend_runtime::{ + governance::pallet_custom_origins::Origin::GeneralAdmin as GeneralAdminOrigin, Dmp, +}; +use westend_system_emulated_network::people_westend_emulated_chain::people_westend_runtime; + +use pallet_identity::Data; + +use emulated_integration_tests_common::accounts::{ALICE, BOB}; + +#[test] +fn relay_commands_add_registrar() { + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: add_registrar_call.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::RegistrarAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_registrar_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + let mut signed_origin = true; + + for (origin_kind, origin) in origins { + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: add_registrar_call.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + if signed_origin { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { error: ProcessMessageError::Unsupported, .. }) => {}, + ] + ); + } else { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + } + }); + + signed_origin = false; + } +} + +#[test] +fn relay_commands_kill_identity() { + // To kill an identity, first one must be set + PeopleWestend::execute_with(|| { + type PeopleRuntime = ::Runtime; + type PeopleRuntimeEvent = ::RuntimeEvent; + + let people_westend_alice = + ::RuntimeOrigin::signed(PeopleWestend::account_id_of(ALICE)); + + let identity_info = IdentityInfo { + email: Data::Raw(b"test@test.io".to_vec().try_into().unwrap()), + ..Default::default() + }; + let identity: Box<::IdentityInformation> = + Box::new(identity_info); + + assert_ok!(::Identity::set_identity( + people_westend_alice, + identity + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::IdentitySet { .. }) => {}, + ] + ); + }); + + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: kill_identity_call.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::IdentityKilled { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_kill_identity_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(BOB); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: kill_identity_call.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} + +#[test] +fn relay_commands_add_remove_username_authority() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + let people_westend_bob = PeopleWestend::account_id_of(BOB); + + let (origin_kind, origin, usr) = + (OriginKind::Superuser, ::RuntimeOrigin::root(), "rootusername"); + + // First, add a username authority. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let add_username_authority = + PeopleCall::Identity(pallet_identity::Call::::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: add_username_authority.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); + + // Now, use the previously added username authority to concede a username to an account. + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::set_username_for( + ::RuntimeOrigin::signed(people_westend_alice.clone()), + people_westend_runtime::MultiAddress::Id(people_westend_bob.clone()), + full_username, + None, + true + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameQueued { .. }) => {}, + ] + ); + }); + + // Accept the given username + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::accept_username( + ::RuntimeOrigin::signed(people_westend_bob.clone()), + full_username.try_into().unwrap(), + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameSet { .. }) => {}, + ] + ); + }); + + // Now, remove the username authority with another priviledged XCM call. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: remove_username_authority.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Final event check. + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityRemoved { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_remove_username_authority_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice.clone()), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let add_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: add_username_authority.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + Dmp::make_parachain_reachable(1004); + + let remove_authority_xcm_msg = + RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: remove_username_authority.encode().into(), + fallback_max_weight: None, + } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs index 08749b295dc2..b9ad9e3db467 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs @@ -14,4 +14,5 @@ // limitations under the License. mod claim_assets; +mod governance; mod teleport; diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index c52021f67e36..09301bd738f3 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -5,6 +5,8 @@ authors = ["Parity Technologies "] edition.workspace = true description = "Managed content" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index e0bed23c4f8c..604441c65f29 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -5,6 +5,8 @@ name = "staging-parachain-info" version = "0.7.0" license = "Apache-2.0" description = "Pallet to store the parachain ID" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 51fc384a4f14..ceb38f39fd80 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -5,6 +5,8 @@ name = "cumulus-ping" version = "0.7.0" license = "Apache-2.0" description = "Ping Pallet for Cumulus XCM/UMP testing." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/ping/src/lib.rs b/cumulus/parachains/pallets/ping/src/lib.rs index 2cf32c891fc0..b6423a81db3c 100644 --- a/cumulus/parachains/pallets/ping/src/lib.rs +++ b/cumulus/parachains/pallets/ping/src/lib.rs @@ -114,6 +114,7 @@ pub mod pallet { }) .encode() .into(), + fallback_max_weight: None, }]), ) { Ok((hash, cost)) => { @@ -214,6 +215,7 @@ pub mod pallet { }) .encode() .into(), + fallback_max_weight: None, }]), ) { Ok((hash, cost)) => diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 42adaba7a27c..81ebc7e09494 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo variant of Asset Hub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -99,6 +101,7 @@ snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -143,6 +146,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 2f9d83bd9d0b..7e1fb247ad3c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -62,7 +62,8 @@ use frame_support::{ ord_parameter_types, parameter_types, traits::{ fungible, fungibles, tokens::imbalance::ResolveAssetTo, AsEnsureOriginWithArg, ConstBool, - ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, InstanceFilter, TransformOrigin, + ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Equals, InstanceFilter, + TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, @@ -467,6 +468,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -652,6 +654,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -918,6 +921,7 @@ impl pallet_nfts::Config for Runtime { type WeightInfo = weights::pallet_nfts::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } /// XCM router instance to BridgeHub with bridging capabilities for `Westend` global @@ -933,6 +937,10 @@ impl pallet_xcm_bridge_hub_router::Config for Runtim type Bridges = xcm_config::bridging::NetworkExportTable; type DestinationVersion = PolkadotXcm; + type BridgeHubOrigin = frame_support::traits::EitherOfDiverse< + EnsureRoot, + EnsureXcm>, + >; type ToBridgeHubSender = XcmpQueue; type LocalXcmChannelManager = cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider; @@ -1412,37 +1420,31 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); - acceptable_assets.extend(assets_in_pool_with_native); + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { let native_asset = xcm_config::TokenLocation::get(); let fee_in_native = WeightToFee::weight_to_fee(&weight); - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == native_asset => { // for native token Ok(fee_in_native) }, Ok(asset_id) => { - let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; - if assets_in_pool_with_this_asset - .into_iter() - .map(|asset_id| asset_id.0) - .any(|location| location == native_asset) { - pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( - asset_id.clone().0, + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), native_asset, fee_in_native, true, // We include the fee. - ).ok_or(XcmPaymentApiError::AssetNotFound) + ) { + Ok(swapped_in_native) } else { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs index 00ecf239428f..9a75428ada8b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -52,14 +52,14 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) - /// Storage: `ToWestendXcmRouter::DeliveryFeeFactor` (r:1 w:1) - /// Proof: `ToWestendXcmRouter::DeliveryFeeFactor` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `153` + // Measured: `154` // Estimated: `5487` - // Minimum execution time: 12_993_000 picoseconds. - Weight::from_parts(13_428_000, 0) + // Minimum execution time: 13_884_000 picoseconds. + Weight::from_parts(14_312_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -72,9 +72,21 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `144` // Estimated: `5487` - // Minimum execution time: 6_305_000 picoseconds. - Weight::from_parts(6_536_000, 0) + // Minimum execution time: 6_909_000 picoseconds. + Weight::from_parts(7_115_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } + /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1502` + // Minimum execution time: 12_394_000 picoseconds. + Weight::from_parts(12_883_000, 0) + .saturating_add(Weight::from_parts(0, 1502)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs index 025c39bcee07..ccf473484cad 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -22,6 +22,7 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -84,7 +85,11 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -172,8 +177,16 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index b69c136b29d9..d48debef94c8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -87,7 +87,7 @@ impl WeightInfo { // Minimum execution time: 5_803_000 picoseconds. Weight::from_parts(5_983_000, 0) } - pub fn set_asset_claimer() -> Weight { + pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index 5da8b45417a3..144934ecd4ab 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -24,10 +24,11 @@ use asset_hub_rococo_runtime::{ ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, }, - AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, - ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, - MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - SessionKeys, TrustBackedAssetsInstance, XcmpQueue, + AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, Block, + CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, + MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, + RuntimeEvent, RuntimeOrigin, SessionKeys, ToWestendXcmRouterInstance, + TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, @@ -1242,6 +1243,58 @@ mod asset_hub_rococo_tests { ) } + #[test] + fn report_bridge_status_from_xcm_bridge_router_for_westend_works() { + asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ToWestendXcmRouterInstance, + >( + collator_session_keys(), + bridging_to_asset_hub_westend, + || bp_asset_hub_rococo::build_congestion_message(Default::default(), true).into(), + || bp_asset_hub_rococo::build_congestion_message(Default::default(), false).into(), + ) + } + + #[test] + fn test_report_bridge_status_call_compatibility() { + // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding + assert_eq!( + RuntimeCall::ToWestendXcmRouter( + pallet_xcm_bridge_hub_router::Call::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode(), + bp_asset_hub_rococo::Call::ToWestendXcmRouter( + bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode() + ); + } + + #[test] + fn check_sane_weight_report_bridge_status_for_westend() { + use pallet_xcm_bridge_hub_router::WeightInfo; + let actual = >::WeightInfo::report_bridge_status(); + let max_weight = bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); + assert!( + actual.all_lte(max_weight), + "max_weight: {:?} should be adjusted to actual {:?}", + max_weight, + actual + ); + } + #[test] fn reserve_transfer_native_asset_to_non_teleport_para_works() { asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< @@ -1471,3 +1524,19 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); + asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index d5eaa43ab834..7dd2a4ab4b51 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend variant of Asset Hub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -101,6 +103,7 @@ snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -147,6 +150,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 2206aea78ec2..ffd54ce4c8ac 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -46,8 +46,8 @@ use frame_support::{ traits::{ fungible, fungibles, tokens::{imbalance::ResolveAssetTo, nonfungibles_v2::Inspect}, - AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, InstanceFilter, - Nothing, TransformOrigin, + AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, Equals, + InstanceFilter, Nothing, TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, @@ -59,6 +59,7 @@ use frame_system::{ use pallet_asset_conversion_tx_payment::SwapAssetAdapter; use pallet_nfts::{DestroyWitness, PalletFeatures}; use pallet_revive::{evm::runtime::EthExtra, AddressMapper}; +use pallet_xcm::EnsureXcm; use parachains_common::{ impls::DealWithFees, message_queue::*, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, CollectionId, Hash, Header, ItemId, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, @@ -124,7 +125,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("westmint"), impl_name: alloc::borrow::Cow::Borrowed("westmint"), authoring_version: 1, - spec_version: 1_016_006, + spec_version: 1_017_002, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -466,6 +467,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -651,6 +653,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -912,6 +915,7 @@ impl pallet_nfts::Config for Runtime { type WeightInfo = weights::pallet_nfts::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } /// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global @@ -927,6 +931,10 @@ impl pallet_xcm_bridge_hub_router::Config for Runtime type Bridges = xcm_config::bridging::NetworkExportTable; type DestinationVersion = PolkadotXcm; + type BridgeHubOrigin = frame_support::traits::EitherOfDiverse< + EnsureRoot, + EnsureXcm>, + >; type ToBridgeHubSender = XcmpQueue; type LocalXcmChannelManager = cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider; @@ -1525,38 +1533,31 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); - acceptable_assets.extend(assets_in_pool_with_native); + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { let native_asset = xcm_config::WestendLocation::get(); let fee_in_native = WeightToFee::weight_to_fee(&weight); - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == native_asset => { // for native asset Ok(fee_in_native) }, Ok(asset_id) => { - // We recognize assets in a pool with the native one. - let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; - if assets_in_pool_with_this_asset - .into_iter() - .map(|asset_id| asset_id.0) - .any(|location| location == native_asset) { - pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( - asset_id.clone().0, + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), native_asset, fee_in_native, true, // We include the fee. - ).ok_or(XcmPaymentApiError::AssetNotFound) + ) { + Ok(swapped_in_native) } else { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) @@ -2085,18 +2086,10 @@ impl_runtime_apis! { let account = ::AddressMapper::to_account_id(&address); System::account_nonce(account) } - fn eth_transact( - from: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> pallet_revive::EthContractResult + + fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> { - use pallet_revive::AddressMapper; - let blockweights = ::BlockWeights::get(); - let origin = ::AddressMapper::to_account_id(&from); + let blockweights: BlockWeights = ::BlockWeights::get(); let encoded_size = |pallet_call| { let call = RuntimeCall::Revive(pallet_call); @@ -2105,15 +2098,9 @@ impl_runtime_apis! { }; Revive::bare_eth_transact( - origin, - dest, - value, - input, - gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + tx, + blockweights.max_block, encoded_size, - pallet_revive::DebugInfo::UnsafeDebug, - pallet_revive::CollectEvents::UnsafeCollect, ) } @@ -2131,7 +2118,7 @@ impl_runtime_apis! { dest, value, gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -2153,7 +2140,7 @@ impl_runtime_apis! { RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), code, data, salt, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs index c0898012e9f3..78aa839deacd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -52,14 +52,14 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) - /// Storage: `ToRococoXcmRouter::DeliveryFeeFactor` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::DeliveryFeeFactor` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `225` + // Measured: `259` // Estimated: `5487` - // Minimum execution time: 13_483_000 picoseconds. - Weight::from_parts(13_862_000, 0) + // Minimum execution time: 14_643_000 picoseconds. + Weight::from_parts(14_992_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -70,11 +70,23 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `111` + // Measured: `144` // Estimated: `5487` - // Minimum execution time: 5_078_000 picoseconds. - Weight::from_parts(5_233_000, 0) + // Minimum execution time: 5_367_000 picoseconds. + Weight::from_parts(5_604_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1502` + // Minimum execution time: 12_562_000 picoseconds. + Weight::from_parts(12_991_000, 0) + .saturating_add(Weight::from_parts(0, 1502)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index 35ff2dc367c0..a0e9705ff01d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -21,6 +21,7 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -83,7 +84,11 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -172,8 +177,16 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 528694123115..0ec2741c0490 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -87,7 +87,7 @@ impl WeightInfo { // Minimum execution time: 5_580_000 picoseconds. Weight::from_parts(5_950_000, 0) } - pub fn set_asset_claimer() -> Weight { + pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 5d0f843554a1..24b6d83ffae4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -24,10 +24,10 @@ use asset_hub_westend_runtime::{ ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, WestendLocation, XcmConfig, }, - AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, + AllPalletsWithoutSystem, Assets, Balances, Block, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - TrustBackedAssetsInstance, XcmpQueue, + ToRococoXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, }; pub use asset_hub_westend_runtime::{AssetConversion, AssetDeposit, CollatorSelection, System}; use asset_test_utils::{ @@ -1250,6 +1250,56 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic ) } +#[test] +fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { + asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ToRococoXcmRouterInstance, + >( + collator_session_keys(), + bridging_to_asset_hub_rococo, + || bp_asset_hub_westend::build_congestion_message(Default::default(), true).into(), + || bp_asset_hub_westend::build_congestion_message(Default::default(), false).into(), + ) +} + +#[test] +fn test_report_bridge_status_call_compatibility() { + // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding + assert_eq!( + RuntimeCall::ToRococoXcmRouter(pallet_xcm_bridge_hub_router::Call::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + }) + .encode(), + bp_asset_hub_westend::Call::ToRococoXcmRouter( + bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode() + ) +} + +#[test] +fn check_sane_weight_report_bridge_status() { + use pallet_xcm_bridge_hub_router::WeightInfo; + let actual = >::WeightInfo::report_bridge_status(); + let max_weight = bp_asset_hub_westend::XcmBridgeHubRouterTransactCallMaxWeight::get(); + assert!( + actual.all_lte(max_weight), + "max_weight: {:?} should be adjusted to actual {:?}", + max_weight, + actual + ); +} + #[test] fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { asset_test_utils::test_cases::change_storage_constant_by_governance_works::< @@ -1446,3 +1496,19 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); + asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index fb66f0de2322..552afa4daa68 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Assets common utilities" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -64,4 +66,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index 1d2d45b42c5d..25c2df6b68d1 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -28,7 +28,7 @@ extern crate alloc; use crate::matching::{LocalLocationPattern, ParentLocation}; use alloc::vec::Vec; use codec::{Decode, EncodeLike}; -use core::cmp::PartialEq; +use core::{cmp::PartialEq, marker::PhantomData}; use frame_support::traits::{Equals, EverythingBut}; use parachains_common::{AssetIdForTrustBackedAssets, CollectionId, ItemId}; use sp_runtime::traits::TryConvertInto; @@ -137,24 +137,62 @@ pub type PoolAssetsConvertedConcreteId = TryConvertInto, >; -/// Returns an iterator of all assets in a pool with `asset`. -/// -/// Should only be used in runtime APIs since it iterates over the whole -/// `pallet_asset_conversion::Pools` map. -/// -/// It takes in any version of an XCM Location but always returns the latest one. -/// This is to allow some margin of migrating the pools when updating the XCM version. -/// -/// An error of type `()` is returned if the version conversion fails for XCM locations. -/// This error should be mapped by the caller to a more descriptive one. -pub fn get_assets_in_pool_with< - Runtime: pallet_asset_conversion::Config, - L: TryInto + Clone + Decode + EncodeLike + PartialEq, ->( - asset: &L, -) -> Result, ()> { - pallet_asset_conversion::Pools::::iter_keys() - .filter_map(|(asset_1, asset_2)| { +/// Adapter implementation for accessing pools (`pallet_asset_conversion`) that uses `AssetKind` as +/// a `xcm::v*` which could be different from the `xcm::latest`. +pub struct PoolAdapter(PhantomData); +impl< + Runtime: pallet_asset_conversion::Config, + L: TryFrom + TryInto + Clone + Decode + EncodeLike + PartialEq, + > PoolAdapter +{ + /// Returns a vector of all assets in a pool with `asset`. + /// + /// Should only be used in runtime APIs since it iterates over the whole + /// `pallet_asset_conversion::Pools` map. + /// + /// It takes in any version of an XCM Location but always returns the latest one. + /// This is to allow some margin of migrating the pools when updating the XCM version. + /// + /// An error of type `()` is returned if the version conversion fails for XCM locations. + /// This error should be mapped by the caller to a more descriptive one. + pub fn get_assets_in_pool_with(asset: Location) -> Result, ()> { + // convert latest to the `L` version. + let asset: L = asset.try_into().map_err(|_| ())?; + Self::iter_assets_in_pool_with(&asset) + .map(|location| { + // convert `L` to the latest `AssetId` + location.try_into().map_err(|_| ()).map(AssetId) + }) + .collect::, _>>() + } + + /// Provides a current prices. Wrapper over + /// `pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens`. + /// + /// An error of type `()` is returned if the version conversion fails for XCM locations. + /// This error should be mapped by the caller to a more descriptive one. + pub fn quote_price_tokens_for_exact_tokens( + asset_1: Location, + asset_2: Location, + amount: Runtime::Balance, + include_fees: bool, + ) -> Result, ()> { + // Convert latest to the `L` version. + let asset_1: L = asset_1.try_into().map_err(|_| ())?; + let asset_2: L = asset_2.try_into().map_err(|_| ())?; + + // Quote swap price. + Ok(pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( + asset_1, + asset_2, + amount, + include_fees, + )) + } + + /// Helper function for filtering pool. + pub fn iter_assets_in_pool_with(asset: &L) -> impl Iterator + '_ { + pallet_asset_conversion::Pools::::iter_keys().filter_map(|(asset_1, asset_2)| { if asset_1 == *asset { Some(asset_2) } else if asset_2 == *asset { @@ -163,8 +201,7 @@ pub fn get_assets_in_pool_with< None } }) - .map(|location| location.try_into().map_err(|_| ()).map(AssetId)) - .collect::, _>>() + } } #[cfg(test)] diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 529d6460fc4e..393d06f95b15 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Test utils for Asset Hub runtimes." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -16,6 +18,7 @@ codec = { features = ["derive", "max-encoded-len"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-balances = { workspace = true } pallet-timestamp = { workspace = true } pallet-session = { workspace = true } @@ -36,6 +39,7 @@ xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } # Bridges pallet-xcm-bridge-hub-router = { workspace = true } @@ -55,6 +59,7 @@ std = [ "cumulus-primitives-core/std", "frame-support/std", "frame-system/std", + "pallet-asset-conversion/std", "pallet-assets/std", "pallet-balances/std", "pallet-collator-selection/std", @@ -69,5 +74,6 @@ std = [ "sp-runtime/std", "xcm-builder/std", "xcm-executor/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 8dc720e27753..b1577e0ca7f6 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -34,11 +34,14 @@ use parachains_runtimes_test_utils::{ CollatorSessionKeys, ExtBuilder, SlotDurations, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ - traits::{MaybeEquivalence, StaticLookup, Zero}, + traits::{Block as BlockT, MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; use xcm::{latest::prelude::*, VersionedAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +use xcm_runtime_apis::fees::{ + runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, +}; type RuntimeHelper = parachains_runtimes_test_utils::RuntimeHelper; @@ -1202,14 +1205,20 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor let xcm = Xcm(vec![ WithdrawAsset(buy_execution_fee.clone().into()), BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { origin_kind: OriginKind::Xcm, call: foreign_asset_create.into() }, + Transact { + origin_kind: OriginKind::Xcm, + call: foreign_asset_create.into(), + fallback_max_weight: None, + }, Transact { origin_kind: OriginKind::SovereignAccount, call: foreign_asset_set_metadata.into(), + fallback_max_weight: None, }, Transact { origin_kind: OriginKind::SovereignAccount, call: foreign_asset_set_team.into(), + fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -1315,7 +1324,11 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor let xcm = Xcm(vec![ WithdrawAsset(buy_execution_fee.clone().into()), BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { origin_kind: OriginKind::Xcm, call: foreign_asset_create.into() }, + Transact { + origin_kind: OriginKind::Xcm, + call: foreign_asset_create.into(), + fallback_max_weight: None, + }, ExpectTransactStatus(MaybeErrorCode::from(DispatchError::BadOrigin.encode())), ]); @@ -1584,3 +1597,108 @@ pub fn reserve_transfer_native_asset_to_non_teleport_para_works< ); }) } + +pub fn xcm_payment_api_with_pools_works() +where + Runtime: XcmPaymentApiV1 + + frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config + + pallet_assets::Config< + pallet_assets::Instance1, + AssetId = u32, + Balance = ::Balance, + > + pallet_asset_conversion::Config< + AssetKind = xcm::v5::Location, + Balance = ::Balance, + >, + ValidatorIdOf: From>, + RuntimeOrigin: OriginTrait::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + Block: BlockT, +{ + use xcm::prelude::*; + + ExtBuilder::::default().build().execute_with(|| { + let test_account = AccountId::from([0u8; 32]); + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + + let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let native_token: Location = Parent.into(); + let native_token_versioned = VersionedAssetId::from(AssetId(native_token.clone())); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); + assert!(execution_fees.is_ok()); + + // We need some balance to create an asset. + assert_ok!( + pallet_balances::Pallet::::mint_into(&test_account, 3_000_000_000_000,) + ); + + // Now we try to use an asset that's not in a pool. + let asset_id = 1984u32; // USDT. + let asset_not_in_pool: Location = + (PalletInstance(50), GeneralIndex(asset_id.into())).into(); + assert_ok!(pallet_assets::Pallet::::create( + RuntimeOrigin::signed(test_account.clone()), + asset_id.into(), + test_account.clone().into(), + 1000 + )); + let execution_fees = Runtime::query_weight_to_asset_fee( + xcm_weight.unwrap(), + asset_not_in_pool.clone().into(), + ); + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + + // We add it to a pool with native. + assert_ok!(pallet_asset_conversion::Pallet::::create_pool( + RuntimeOrigin::signed(test_account.clone()), + native_token.clone().try_into().unwrap(), + asset_not_in_pool.clone().try_into().unwrap() + )); + let execution_fees = Runtime::query_weight_to_asset_fee( + xcm_weight.unwrap(), + asset_not_in_pool.clone().into(), + ); + // Still not enough because it doesn't have any liquidity. + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + + // We mint some of the asset... + assert_ok!(pallet_assets::Pallet::::mint( + RuntimeOrigin::signed(test_account.clone()), + asset_id.into(), + test_account.clone().into(), + 3_000_000_000_000, + )); + // ...so we can add liquidity to the pool. + assert_ok!(pallet_asset_conversion::Pallet::::add_liquidity( + RuntimeOrigin::signed(test_account.clone()), + native_token.try_into().unwrap(), + asset_not_in_pool.clone().try_into().unwrap(), + 1_000_000_000_000, + 2_000_000_000_000, + 0, + 0, + test_account + )); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), asset_not_in_pool.into()); + // Now it works! + assert_ok!(execution_fees); + }); +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 4f144e24aa30..9b05f2d46dfb 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -551,10 +551,7 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< Weight::zero(), ); assert_ok!(outcome.ensure_complete()); - assert_eq!( - is_congested, - <>::LocalXcmChannelManager as pallet_xcm_bridge_hub_router::XcmChannelStatusProvider>::is_congested(&local_bridge_hub_location) - ); + assert_eq!(is_congested, pallet_xcm_bridge_hub_router::Pallet::::bridge().is_congested); }; report_bridge_status(true); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 4af8a9f43850..ff50223ef575 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's BridgeHub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -96,6 +98,7 @@ bp-relayers = { workspace = true } bp-runtime = { workspace = true } bp-rococo = { workspace = true } bp-westend = { workspace = true } +bp-xcm-bridge-hub-router = { workspace = true } pallet-bridge-grandpa = { workspace = true } pallet-bridge-messages = { workspace = true } pallet-bridge-parachains = { workspace = true } @@ -122,6 +125,7 @@ bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] @@ -140,6 +144,7 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", + "bp-xcm-bridge-hub-router/std", "bridge-hub-common/std", "bridge-runtime-common/std", "codec/std", @@ -261,6 +266,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 7e0385692375..1e733503f43b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -22,14 +22,13 @@ use crate::{ bridge_common_config::RelayersForPermissionlessLanesInstance, weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeRococoBulletinGrandpa, - BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, RuntimeHoldReason, - XcmOverRococoBulletin, XcmRouter, + BridgeRococoBulletinMessages, Runtime, RuntimeEvent, RuntimeHoldReason, XcmOverRococoBulletin, + XcmRouter, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, HashedLaneId, + target_chain::FromBridgedChainMessagesProof, LegacyLaneId, }; -use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; use frame_support::{ parameter_types, @@ -46,6 +45,7 @@ use testnet_parachains_constants::rococo::currency::UNITS as ROC; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, + AlwaysV5, }; use xcm_builder::{BridgeBlobDispatcher, ParentIsPreset, SiblingParachainConvertsVia}; @@ -120,7 +120,7 @@ impl pallet_bridge_messages::Config for Runt type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type LaneId = HashedLaneId; + type LaneId = LegacyLaneId; type DeliveryPayments = (); type DeliveryConfirmationPayments = (); @@ -139,8 +139,7 @@ impl pallet_xcm_bridge_hub::Config for Runtime type BridgeMessagesPalletInstance = WithRococoBulletinMessagesInstance; type MessageExportPrice = (); - type DestinationVersion = - XcmVersionOfDestAndRemoteBridge; + type DestinationVersion = AlwaysV5; type ForceOrigin = EnsureRoot; // We don't want to allow creating bridges for this instance. @@ -201,7 +200,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoBulletinInstance, with_bridged_chain_messages_instance: WithRococoBulletinMessagesInstance, this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_polkadot_bulletin::PolkadotBulletin, @@ -254,7 +252,7 @@ where let universal_source = [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(sibling_para_id)].into(); let universal_destination = - [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get()), Parachain(2075)].into(); + [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); let bridge_id = BridgeId::new(&universal_source, &universal_destination); // insert only bridge metadata, because the benchmarks create lanes @@ -280,29 +278,3 @@ where universal_source } - -/// Contains the migration for the PeopleRococo<>RococoBulletin bridge. -pub mod migration { - use super::*; - use frame_support::traits::ConstBool; - - parameter_types! { - pub BulletinRococoLocation: InteriorLocation = [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); - pub RococoPeopleToRococoBulletinMessagesLane: HashedLaneId = pallet_xcm_bridge_hub::Pallet::< Runtime, XcmOverPolkadotBulletinInstance >::bridge_locations( - PeopleRococoLocation::get(), - BulletinRococoLocation::get() - ) - .unwrap() - .calculate_lane_id(xcm::latest::VERSION).expect("Valid locations"); - } - - /// Ensure that the existing lanes for the People<>Bulletin bridge are correctly configured. - pub type StaticToDynamicLanes = pallet_xcm_bridge_hub::migration::OpenBridgeForLane< - Runtime, - XcmOverPolkadotBulletinInstance, - RococoPeopleToRococoBulletinMessagesLane, - ConstBool, - PeopleRococoLocation, - BulletinRococoLocation, - >; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 0eab3c74a7e2..a14101eb454b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -24,14 +24,14 @@ use crate::{ weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeWestendMessages, PolkadotXcm, Runtime, RuntimeEvent, - RuntimeHoldReason, XcmOverBridgeHubWestend, XcmRouter, + RuntimeHoldReason, XcmOverBridgeHubWestend, XcmRouter, XcmpQueue, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, target_chain::FromBridgedChainMessagesProof, LegacyLaneId, }; use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; -use pallet_xcm_bridge_hub::XcmAsPlainPayload; +use pallet_xcm_bridge_hub::{BridgeId, XcmAsPlainPayload}; use frame_support::{parameter_types, traits::PalletInfoAccess}; use frame_system::{EnsureNever, EnsureRoot}; @@ -121,6 +121,7 @@ impl pallet_bridge_messages::Config for Ru type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubWestendMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, DeliveryRewardInBalance, >; @@ -156,11 +157,46 @@ impl pallet_xcm_bridge_hub::Config for Runtime type AllowWithoutBridgeDeposit = RelayOrOtherSystemParachains; - // TODO:(bridges-v2) - add `LocalXcmChannelManager` impl - https://github.com/paritytech/parity-bridges-common/issues/3047 - type LocalXcmChannelManager = (); + type LocalXcmChannelManager = CongestionManager; type BlobDispatcher = FromWestendMessageBlobDispatcher; } +/// Implementation of `bp_xcm_bridge_hub::LocalXcmChannelManager` for congestion management. +pub struct CongestionManager; +impl pallet_xcm_bridge_hub::LocalXcmChannelManager for CongestionManager { + type Error = SendError; + + fn is_congested(with: &Location) -> bool { + // This is used to check the inbound bridge queue/messages to determine if they can be + // dispatched and sent to the sibling parachain. Therefore, checking outbound `XcmpQueue` + // is sufficient here. + use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; + cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider::::is_congested( + with, + ) + } + + fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + // This bridge is intended for AH<>AH communication with a hard-coded/static lane, + // so `local_origin` is expected to represent only the local AH. + send_xcm::( + local_origin.clone(), + bp_asset_hub_rococo::build_congestion_message(bridge.inner(), true).into(), + ) + .map(|_| ()) + } + + fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + // This bridge is intended for AH<>AH communication with a hard-coded/static lane, + // so `local_origin` is expected to represent only the local AH. + send_xcm::( + local_origin.clone(), + bp_asset_hub_rococo::build_congestion_message(bridge.inner(), false).into(), + ) + .map(|_| ()) + } +} + #[cfg(feature = "runtime-benchmarks")] pub(crate) fn open_bridge_for_benchmarks( with: pallet_xcm_bridge_hub::LaneIdOf, @@ -256,7 +292,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaWestendInstance, with_bridged_chain_messages_instance: WithBridgeHubWestendMessagesInstance, this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_bridge_hub_westend::BridgeHubWestend, @@ -266,7 +301,6 @@ mod tests { Runtime, BridgeGrandpaWestendInstance, WithBridgeHubWestendMessagesInstance, - bp_westend::Westend, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_rococo::BlockLength::get(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs index 98e2450ee832..55fd499c2f54 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs @@ -61,10 +61,20 @@ fn bridge_hub_rococo_genesis( .collect(), }, polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + bridge_polkadot_bulletin_grandpa: BridgePolkadotBulletinGrandpaConfig { + owner: bridges_pallet_owner.clone(), + }, bridge_westend_grandpa: BridgeWestendGrandpaConfig { owner: bridges_pallet_owner.clone() }, bridge_westend_messages: BridgeWestendMessagesConfig { owner: bridges_pallet_owner.clone(), }, + xcm_over_polkadot_bulletin: XcmOverPolkadotBulletinConfig { + opened_bridges: vec![( + Location::new(1, [Parachain(1004)]), + Junctions::from([GlobalConsensus(NetworkId::PolkadotBulletin).into()]), + Some(bp_messages::LegacyLaneId([0, 0, 0, 0])), + )], + }, xcm_over_bridge_hub_westend: XcmOverBridgeHubWestendConfig { opened_bridges }, ethereum_system: EthereumSystemConfig { para_id: id, asset_hub_para_id }, }) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index ff7af475f5e2..d87ff9b43fef 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -169,7 +169,6 @@ pub type Migrations = ( bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >, bridge_to_westend_config::migration::StaticToDynamicLanes, - bridge_to_bulletin_config::migration::StaticToDynamicLanes, frame_support::migrations::RemoveStorage< BridgeWestendMessagesPalletName, OutboundLanesCongestedSignalsKey, @@ -537,6 +536,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -846,7 +846,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index 288aac38563c..efc2798999bf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -22,6 +22,7 @@ use codec::Encode; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -84,7 +85,11 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -253,8 +258,16 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index bac73e0e0567..daf22190a42b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -373,7 +373,7 @@ impl WeightInfo { // Minimum execution time: 1_085_000 picoseconds. Weight::from_parts(1_161_000, 0) } - pub fn set_asset_claimer() -> Weight { + pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index 8be2993c68f4..d5baa1c71dfd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -29,7 +29,7 @@ use frame_support::parameter_types; use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_pallet_ethereum_client::WeightInfo; use sp_core::H160; -use sp_keyring::AccountKeyring::Alice; +use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, @@ -166,7 +166,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { } fn construct_extrinsic( - sender: sp_keyring::AccountKeyring, + sender: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); @@ -192,7 +192,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - origin: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(origin, call); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 2e7dd98e9dce..8d74b221a609 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -20,9 +20,9 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ bridge_common_config, bridge_to_bulletin_config, bridge_to_westend_config, xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - TransactionPayment, TxExtension, UncheckedExtrinsic, + AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, + ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_hub_test_utils::SlotDurations; use codec::{Decode, Encode}; @@ -31,7 +31,7 @@ use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_core::ChannelId; use sp_consensus_aura::SlotDuration; use sp_core::{crypto::Ss58Codec, H160}; -use sp_keyring::AccountKeyring::Alice; +use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, Perbill, @@ -45,7 +45,7 @@ parameter_types! { } fn construct_extrinsic( - sender: sp_keyring::AccountKeyring, + sender: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); @@ -72,7 +72,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - relayer_at_target: sp_keyring::AccountKeyring, + relayer_at_target: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(relayer_at_target, call); @@ -324,11 +324,12 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) } ).1 }, @@ -388,11 +389,12 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -422,11 +424,12 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -498,10 +501,10 @@ mod bridge_hub_westend_tests { mod bridge_hub_bulletin_tests { use super::*; - use bp_messages::{HashedLaneId, LaneIdType}; + use bp_messages::LegacyLaneId; use bridge_common_config::BridgeGrandpaRococoBulletinInstance; use bridge_hub_rococo_runtime::{ - bridge_common_config::RelayersForPermissionlessLanesInstance, + bridge_common_config::RelayersForLegacyLaneIdsMessagesInstance, xcm_config::LocationToAccountId, }; use bridge_hub_test_utils::test_cases::from_grandpa_chain; @@ -525,7 +528,7 @@ mod bridge_hub_bulletin_tests { AllPalletsWithoutSystem, BridgeGrandpaRococoBulletinInstance, WithRococoBulletinMessagesInstance, - RelayersForPermissionlessLanesInstance, + RelayersForLegacyLaneIdsMessagesInstance, >; #[test] @@ -591,11 +594,12 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance - >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) } ).1 }, @@ -654,11 +658,12 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) }, ) .1 @@ -687,11 +692,12 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) }, ) .1 @@ -832,3 +838,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 637e7c710640..efdd0abbb8ee 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's BridgeHub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -95,6 +97,7 @@ bp-relayers = { workspace = true } bp-runtime = { workspace = true } bp-rococo = { workspace = true } bp-westend = { workspace = true } +bp-xcm-bridge-hub-router = { workspace = true } pallet-bridge-grandpa = { workspace = true } pallet-bridge-messages = { workspace = true } pallet-bridge-parachains = { workspace = true } @@ -121,6 +124,7 @@ bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] @@ -137,6 +141,7 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", + "bp-xcm-bridge-hub-router/std", "bridge-hub-common/std", "bridge-runtime-common/std", "codec/std", @@ -258,6 +263,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index 62c93da7c831..24e5482b7b09 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -21,7 +21,7 @@ use crate::{ weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeRococoMessages, PolkadotXcm, Runtime, RuntimeEvent, - RuntimeHoldReason, XcmOverBridgeHubRococo, XcmRouter, + RuntimeHoldReason, XcmOverBridgeHubRococo, XcmRouter, XcmpQueue, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, @@ -29,7 +29,7 @@ use bp_messages::{ }; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; -use pallet_xcm_bridge_hub::XcmAsPlainPayload; +use pallet_xcm_bridge_hub::{BridgeId, XcmAsPlainPayload}; use frame_support::{ parameter_types, @@ -152,6 +152,7 @@ impl pallet_bridge_messages::Config for Run type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubRococoMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, DeliveryRewardInBalance, >; @@ -185,11 +186,46 @@ impl pallet_xcm_bridge_hub::Config for Runtime { type AllowWithoutBridgeDeposit = RelayOrOtherSystemParachains; - // TODO:(bridges-v2) - add `LocalXcmChannelManager` impl - https://github.com/paritytech/parity-bridges-common/issues/3047 - type LocalXcmChannelManager = (); + type LocalXcmChannelManager = CongestionManager; type BlobDispatcher = FromRococoMessageBlobDispatcher; } +/// Implementation of `bp_xcm_bridge_hub::LocalXcmChannelManager` for congestion management. +pub struct CongestionManager; +impl pallet_xcm_bridge_hub::LocalXcmChannelManager for CongestionManager { + type Error = SendError; + + fn is_congested(with: &Location) -> bool { + // This is used to check the inbound bridge queue/messages to determine if they can be + // dispatched and sent to the sibling parachain. Therefore, checking outbound `XcmpQueue` + // is sufficient here. + use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; + cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider::::is_congested( + with, + ) + } + + fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + // This bridge is intended for AH<>AH communication with a hard-coded/static lane, + // so `local_origin` is expected to represent only the local AH. + send_xcm::( + local_origin.clone(), + bp_asset_hub_westend::build_congestion_message(bridge.inner(), true).into(), + ) + .map(|_| ()) + } + + fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + // This bridge is intended for AH<>AH communication with a hard-coded/static lane, + // so `local_origin` is expected to represent only the local AH. + send_xcm::( + local_origin.clone(), + bp_asset_hub_westend::build_congestion_message(bridge.inner(), false).into(), + ) + .map(|_| ()) + } +} + #[cfg(feature = "runtime-benchmarks")] pub(crate) fn open_bridge_for_benchmarks( with: pallet_xcm_bridge_hub::LaneIdOf, @@ -284,7 +320,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, this_chain: bp_bridge_hub_westend::BridgeHubWestend, bridged_chain: bp_bridge_hub_rococo::BridgeHubRococo, @@ -294,7 +329,6 @@ mod tests { Runtime, BridgeGrandpaRococoInstance, WithBridgeHubRococoMessagesInstance, - bp_rococo::Rococo, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_westend::BlockLength::get(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 065400016791..ae3dbfa06cba 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -513,6 +513,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -778,7 +779,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs index fa1304d11c6f..15a1dae09d9b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -23,6 +23,7 @@ use codec::Encode; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -85,7 +86,11 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -174,8 +179,16 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 6434f6206fbe..03cbaa866ad8 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -373,7 +373,7 @@ impl WeightInfo { // Minimum execution time: 995_000 picoseconds. Weight::from_parts(1_060_000, 0) } - pub fn set_asset_claimer() -> Weight { + pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs index 1a1ce2a28ea3..d71400fa71b6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs @@ -30,7 +30,7 @@ use frame_support::parameter_types; use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_pallet_ethereum_client::WeightInfo; use sp_core::H160; -use sp_keyring::AccountKeyring::Alice; +use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, @@ -167,7 +167,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { } fn construct_extrinsic( - sender: sp_keyring::AccountKeyring, + sender: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); @@ -193,7 +193,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - origin: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(origin, call); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 69301b34fe6b..9d32f28f4fc6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -27,9 +27,9 @@ use bridge_hub_westend_runtime::{ bridge_common_config, bridge_to_rococo_config, bridge_to_rococo_config::RococoGlobalConsensusNetwork, xcm_config::{LocationToAccountId, RelayNetwork, WestendLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - TransactionPayment, TxExtension, UncheckedExtrinsic, + AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, + ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoLocation, BridgeParachainRococoInstance, @@ -40,7 +40,7 @@ use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8} use parachains_common::{AccountId, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_core::crypto::Ss58Codec; -use sp_keyring::AccountKeyring::Alice; +use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, Perbill, @@ -77,7 +77,7 @@ parameter_types! { } fn construct_extrinsic( - sender: sp_keyring::AccountKeyring, + sender: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); @@ -104,7 +104,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - relayer_at_target: sp_keyring::AccountKeyring, + relayer_at_target: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(relayer_at_target, call); @@ -246,10 +246,11 @@ fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) } ).1 }, @@ -307,11 +308,12 @@ fn relayed_incoming_message_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -341,11 +343,12 @@ fn free_relay_extrinsic_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -522,3 +525,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 9cb24a2b2820..9eacb27639a3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Bridge hub common utilities" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [dependencies] codec = { features = ["derive"], workspace = true } @@ -39,4 +41,5 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 915b3090092f..16fef951f328 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utils for BridgeHub testing" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs index 320f3030b60a..358c184c815d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -34,7 +34,7 @@ use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -103,7 +103,7 @@ pub fn relayed_incoming_message_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, expect_rewards: bool, @@ -210,7 +210,7 @@ pub fn free_relay_extrinsic_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, expect_rewards: bool, @@ -344,7 +344,7 @@ pub fn complex_relay_extrinsic_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, ) where diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs index 1da901e0bcdf..d8fff55b4b50 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -36,7 +36,7 @@ use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -112,7 +112,7 @@ pub fn relayed_incoming_message_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, expect_rewards: bool, @@ -246,7 +246,7 @@ pub fn free_relay_extrinsic_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, expect_rewards: bool, @@ -414,7 +414,7 @@ pub fn complex_relay_extrinsic_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index aac60bba0b53..a99bda5bfdf4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -29,7 +29,7 @@ use core::marker::PhantomData; use frame_support::{ assert_ok, dispatch::GetDispatchInfo, - traits::{fungible::Mutate, OnFinalize, OnInitialize, PalletInfoAccess}, + traits::{fungible::Mutate, Contains, OnFinalize, OnInitialize, PalletInfoAccess}, }; use frame_system::pallet_prelude::BlockNumberFor; use pallet_bridge_grandpa::{BridgedBlockHash, BridgedHeader}; @@ -39,7 +39,7 @@ use parachains_runtimes_test_utils::{ mock_open_hrmp_channel, AccountIdOf, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use sp_runtime::{traits::TrailingZeroInput, AccountId32}; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -264,7 +264,7 @@ pub fn relayed_incoming_message_works( sibling_parachain_id: u32, local_relay_chain_id: NetworkId, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, prepare_message_proof_import: impl FnOnce( @@ -374,9 +374,9 @@ pub fn relayed_incoming_message_works( /// Execute every call and verify its outcome. fn execute_and_verify_calls( - submitter: sp_keyring::AccountKeyring, + submitter: sp_keyring::Sr25519Keyring, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, calls_and_verifiers: CallsAndVerifiers, @@ -395,7 +395,7 @@ pub fn ensure_opened_bridge< XcmOverBridgePalletInstance, LocationToAccountId, TokenLocation> -(source: Location, destination: InteriorLocation, bridge_opener: impl Fn(BridgeLocations, Asset)) -> (BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) +(source: Location, destination: InteriorLocation, is_paid_xcm_execution: bool, bridge_opener: impl Fn(BridgeLocations, Option)) -> (BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, @@ -416,24 +416,37 @@ TokenLocation: Get{ ) .is_none()); - // required balance: ED + fee + BridgeDeposit - let bridge_deposit = - >::BridgeDeposit::get( - ); - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 5_000_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into() + - bridge_deposit.into(); - // SA of source location needs to have some required balance - let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); + if !>::AllowWithoutBridgeDeposit::contains(&source) { + // required balance: ED + fee + BridgeDeposit + let bridge_deposit = + >::BridgeDeposit::get( + ); + let balance_needed = ::ExistentialDeposit::get() + bridge_deposit.into(); + + let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + }; + + let maybe_paid_execution = if is_paid_xcm_execution { + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 5_000_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into(); + let source_account_id = + LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + Some(buy_execution_fee) + } else { + None + }; // call the bridge opener - bridge_opener(*locations.clone(), buy_execution_fee); + bridge_opener(*locations.clone(), maybe_paid_execution); // check opened bridge let bridge = pallet_xcm_bridge_hub::Bridges::::get( @@ -452,8 +465,9 @@ TokenLocation: Get{ /// Utility for opening bridge with dedicated `pallet_xcm_bridge_hub`'s extrinsic. pub fn open_bridge_with_extrinsic( - locations: BridgeLocations, - buy_execution_fee: Asset, + (origin, origin_kind): (Location, OriginKind), + bridge_destination_universal_location: InteriorLocation, + maybe_paid_execution: Option, ) where Runtime: frame_system::Config + pallet_xcm_bridge_hub::Config @@ -469,15 +483,15 @@ pub fn open_bridge_with_extrinsic( XcmOverBridgePalletInstance, >::open_bridge { bridge_destination_universal_location: Box::new( - locations.bridge_destination_universal_location().clone().into(), + bridge_destination_universal_location.clone().into(), ), }); // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin_xcm( - locations.bridge_origin_relative_location().clone(), + assert_ok!(RuntimeHelper::::execute_as_origin( + (origin, origin_kind), open_bridge_call, - buy_execution_fee + maybe_paid_execution ) .ensure_complete()); } @@ -486,7 +500,6 @@ pub fn open_bridge_with_extrinsic( /// purposes). pub fn open_bridge_with_storage( locations: BridgeLocations, - _buy_execution_fee: Asset, lane_id: pallet_xcm_bridge_hub::LaneIdOf, ) where Runtime: pallet_xcm_bridge_hub::Config, @@ -503,8 +516,12 @@ pub fn open_bridge_with_storage( } /// Helper function to close the bridge/lane for `source` and `destination`. -pub fn close_bridge(source: Location, destination: InteriorLocation) -where +pub fn close_bridge( + expected_source: Location, + bridge_destination_universal_location: InteriorLocation, + (origin, origin_kind): (Location, OriginKind), + is_paid_xcm_execution: bool +) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, ::RuntimeCall: GetDispatchInfo + From>, @@ -515,8 +532,8 @@ TokenLocation: Get{ // construct expected bridge configuration let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - source.clone().into(), - destination.clone().into(), + expected_source.clone().into(), + bridge_destination_universal_location.clone().into(), ) .expect("valid bridge locations"); assert!(pallet_xcm_bridge_hub::Bridges::::get( @@ -525,35 +542,38 @@ TokenLocation: Get{ .is_some()); // required balance: ED + fee + BridgeDeposit - let bridge_deposit = - >::BridgeDeposit::get( - ); - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 2_500_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into() + - bridge_deposit.into(); - - // SA of source location needs to have some required balance - let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); + let maybe_paid_execution = if is_paid_xcm_execution { + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 2_500_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into(); + let source_account_id = + LocationToAccountId::convert_location(&expected_source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + Some(buy_execution_fee) + } else { + None + }; // close bridge with `Transact` call let close_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< Runtime, XcmOverBridgePalletInstance, >::close_bridge { - bridge_destination_universal_location: Box::new(destination.into()), + bridge_destination_universal_location: Box::new( + bridge_destination_universal_location.into(), + ), may_prune_messages: 16, }); // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin_xcm( - source.clone(), + assert_ok!(RuntimeHelper::::execute_as_origin( + (origin, origin_kind), close_bridge_call, - buy_execution_fee + maybe_paid_execution ) .ensure_complete()); diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs index ad6db0b83e80..f96d0bf405b9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs @@ -654,8 +654,10 @@ where pub fn open_and_close_bridge_works( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, - source: Location, + expected_source: Location, destination: InteriorLocation, + origin_with_origin_kind: (Location, OriginKind), + is_paid_xcm_execution: bool, ) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, @@ -669,7 +671,7 @@ pub fn open_and_close_bridge_works(collator_session_key, runtime_para_id, vec![], || { // construct expected bridge configuration let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - source.clone().into(), + expected_source.clone().into(), destination.clone().into(), ).expect("valid bridge locations"); let expected_lane_id = @@ -704,7 +706,7 @@ pub fn open_and_close_bridge_works( - source.clone(), + expected_source.clone(), destination.clone(), - open_bridge_with_extrinsic:: + is_paid_xcm_execution, + |locations, maybe_paid_execution| open_bridge_with_extrinsic::< + Runtime, + XcmOverBridgePalletInstance, + >( + origin_with_origin_kind.clone(), + locations.bridge_destination_universal_location().clone(), + maybe_paid_execution + ) ) .0 .bridge_id(), @@ -727,7 +737,7 @@ pub fn open_and_close_bridge_works(source.clone(), destination); + >(expected_source, destination, origin_with_origin_kind, is_paid_xcm_execution); // check bridge/lane DOES not exist assert_eq!( diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index e03fc934ceaf..2e35fe761c04 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Westend Collectives Parachain Runtime" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -94,6 +96,7 @@ substrate-wasm-builder = { optional = true, workspace = true, default-features = [dev-dependencies] sp-io = { features = ["std"], workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] @@ -135,6 +138,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index c3e105a84fb6..f4c62f212e8c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -258,6 +258,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -382,6 +383,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -961,7 +963,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::WndLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 56ef2e8ba02f..9eb9b85a3918 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -35,7 +35,8 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use westend_runtime_constants::xcm as xcm_constants; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, + AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, @@ -191,6 +192,10 @@ pub type WaivedLocations = ( /// - DOT with the parent Relay Chain and sibling parachains. pub type TrustedTeleporters = ConcreteAssetFromSystem; +/// We allow locations to alias into their own child locations, as well as +/// AssetHub to alias into anything. +pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -227,7 +232,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs index 7add10559d84..c9191eba49f6 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use collectives_westend_runtime::xcm_config::LocationToAccountId; +use collectives_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/constants/Cargo.toml b/cumulus/parachains/runtimes/constants/Cargo.toml index d54f1e7db6c1..01b023e0fb89 100644 --- a/cumulus/parachains/runtimes/constants/Cargo.toml +++ b/cumulus/parachains/runtimes/constants/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common constants for Testnet Parachains runtimes" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index c98ca7ba3e74..260c748819ae 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -5,6 +5,8 @@ description = "Parachain testnet runtime for FRAME Contracts pallet." authors.workspace = true edition.workspace = true license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -171,6 +173,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index f661a8bdccfe..2951662a979b 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -268,6 +268,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index a38b7400cfa3..aa692c3c7e74 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's Coretime parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -80,6 +82,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true } + [features] default = ["std"] std = [ @@ -120,6 +125,7 @@ std = [ "pallet-xcm/std", "parachain-info/std", "parachains-common/std", + "parachains-runtimes-test-utils/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "rococo-runtime-constants/std", @@ -174,6 +180,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index d76ac443a147..35c3dd8836a8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -135,6 +135,7 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: request_core_count_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -164,6 +165,7 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: request_revenue_info_at_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -192,6 +194,7 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: credit_account_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -256,6 +259,7 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: assign_core_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 31700c2e25ff..ae3ad93a9e85 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -445,6 +445,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -577,6 +578,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -833,7 +835,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RocRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 35708f22de20..5cb01f62cd26 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_024_000 picoseconds. - Weight::from_parts(2_121_000, 0) + // Minimum execution time: 2_250_000 picoseconds. + Weight::from_parts(2_419_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_654_000 picoseconds. - Weight::from_parts(22_591_000, 0) + // Minimum execution time: 25_785_000 picoseconds. + Weight::from_parts(26_335_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_769_000 picoseconds. - Weight::from_parts(21_328_000, 0) + // Minimum execution time: 24_549_000 picoseconds. + Weight::from_parts(25_010_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `1951` - // Minimum execution time: 10_404_000 picoseconds. - Weight::from_parts(10_941_000, 0) + // Minimum execution time: 14_135_000 picoseconds. + Weight::from_parts(14_603_000, 0) .saturating_add(Weight::from_parts(0, 1951)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -121,6 +121,8 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -132,31 +134,33 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12599` // Estimated: `15065 + n * (1 ±0)` - // Minimum execution time: 44_085_000 picoseconds. - Weight::from_parts(127_668_002, 0) + // Minimum execution time: 54_087_000 picoseconds. + Weight::from_parts(145_466_213, 0) .saturating_add(Weight::from_parts(0, 15065)) - // Standard Error: 2_231 - .saturating_add(Weight::from_parts(20_604, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(13)) - .saturating_add(T::DbWeight::get().writes(59)) + // Standard Error: 2_407 + .saturating_add(Weight::from_parts(20_971, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(60)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `332` + // Measured: `437` // Estimated: `3593` - // Minimum execution time: 45_100_000 picoseconds. - Weight::from_parts(46_263_000, 0) + // Minimum execution time: 58_341_000 picoseconds. + Weight::from_parts(59_505_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -169,16 +173,18 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `553` + // Measured: `658` // Estimated: `4698` - // Minimum execution time: 65_944_000 picoseconds. - Weight::from_parts(68_666_000, 0) + // Minimum execution time: 92_983_000 picoseconds. + Weight::from_parts(99_237_000, 0) .saturating_add(Weight::from_parts(0, 4698)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -187,8 +193,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 13_794_000 picoseconds. - Weight::from_parts(14_450_000, 0) + // Minimum execution time: 17_512_000 picoseconds. + Weight::from_parts(18_099_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -199,8 +205,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 15_316_000 picoseconds. - Weight::from_parts(15_787_000, 0) + // Minimum execution time: 18_715_000 picoseconds. + Weight::from_parts(19_768_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -211,8 +217,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 16_375_000 picoseconds. - Weight::from_parts(17_113_000, 0) + // Minimum execution time: 20_349_000 picoseconds. + Weight::from_parts(21_050_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -229,8 +235,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `937` // Estimated: `4681` - // Minimum execution time: 25_952_000 picoseconds. - Weight::from_parts(27_198_000, 0) + // Minimum execution time: 31_876_000 picoseconds. + Weight::from_parts(33_536_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -249,8 +255,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1003` // Estimated: `5996` - // Minimum execution time: 31_790_000 picoseconds. - Weight::from_parts(32_920_000, 0) + // Minimum execution time: 39_500_000 picoseconds. + Weight::from_parts(40_666_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -264,13 +270,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// The range of component `m` is `[1, 3]`. fn claim_revenue(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `652` + // Measured: `671` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 56_286_000 picoseconds. - Weight::from_parts(56_946_240, 0) + // Minimum execution time: 65_843_000 picoseconds. + Weight::from_parts(65_768_512, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 44_472 - .saturating_add(Weight::from_parts(1_684_838, 0).saturating_mul(m.into())) + // Standard Error: 40_994 + .saturating_add(Weight::from_parts(2_084_877, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -290,11 +296,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 64_967_000 picoseconds. - Weight::from_parts(66_504_000, 0) - .saturating_add(Weight::from_parts(0, 3787)) + // Measured: `323` + // Estimated: `3788` + // Minimum execution time: 73_250_000 picoseconds. + Weight::from_parts(75_059_000, 0) + .saturating_add(Weight::from_parts(0, 3788)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -306,8 +312,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `3551` - // Minimum execution time: 37_552_000 picoseconds. - Weight::from_parts(46_263_000, 0) + // Minimum execution time: 55_088_000 picoseconds. + Weight::from_parts(65_329_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -322,8 +328,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 79_625_000 picoseconds. - Weight::from_parts(86_227_000, 0) + // Minimum execution time: 102_280_000 picoseconds. + Weight::from_parts(130_319_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -338,10 +344,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `857` + // Measured: `979` // Estimated: `3593` - // Minimum execution time: 88_005_000 picoseconds. - Weight::from_parts(92_984_000, 0) + // Minimum execution time: 78_195_000 picoseconds. + Weight::from_parts(105_946_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -354,8 +360,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `957` // Estimated: `4698` - // Minimum execution time: 38_877_000 picoseconds. - Weight::from_parts(40_408_000, 0) + // Minimum execution time: 41_642_000 picoseconds. + Weight::from_parts(48_286_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -371,15 +377,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 20_581_000 picoseconds. - Weight::from_parts(21_610_297, 0) + // Minimum execution time: 23_727_000 picoseconds. + Weight::from_parts(25_029_439, 0) .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 119 - .saturating_add(Weight::from_parts(144, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -390,11 +394,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 6_079_000 picoseconds. - Weight::from_parts(6_540_110, 0) + // Minimum execution time: 7_887_000 picoseconds. + Weight::from_parts(8_477_863, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 14 - .saturating_add(Weight::from_parts(10, 0).saturating_mul(n.into())) + // Standard Error: 18 + .saturating_add(Weight::from_parts(76, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -406,36 +410,50 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `442` + // Measured: `461` // Estimated: `6196` - // Minimum execution time: 42_947_000 picoseconds. - Weight::from_parts(43_767_000, 0) + // Minimum execution time: 52_505_000 picoseconds. + Weight::from_parts(53_392_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::Reservations` (r:1 w:0) /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:100 w:200) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:101 w:101) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:60) + /// Storage: `Broker::Workplan` (r:0 w:1000) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `12514` - // Estimated: `13506` - // Minimum execution time: 93_426_000 picoseconds. - Weight::from_parts(96_185_447, 0) - .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 116 - .saturating_add(Weight::from_parts(4, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(65)) + // Measured: `32497` + // Estimated: `233641 + n * (198 ±9)` + // Minimum execution time: 28_834_000 picoseconds. + Weight::from_parts(2_467_159_777, 0) + .saturating_add(Weight::from_parts(0, 233641)) + // Standard Error: 149_483 + .saturating_add(Weight::from_parts(4_045_956, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(126)) + .saturating_add(T::DbWeight::get().writes(181)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 198).saturating_mul(n.into())) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -445,8 +463,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 5_842_000 picoseconds. - Weight::from_parts(6_077_000, 0) + // Minimum execution time: 7_689_000 picoseconds. + Weight::from_parts(7_988_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -469,8 +487,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 33_278_000 picoseconds. - Weight::from_parts(34_076_000, 0) + // Minimum execution time: 37_394_000 picoseconds. + Weight::from_parts(38_379_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -489,8 +507,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 15_779_000 picoseconds. - Weight::from_parts(16_213_000, 0) + // Minimum execution time: 19_203_000 picoseconds. + Weight::from_parts(19_797_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -501,8 +519,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_774_000 picoseconds. - Weight::from_parts(1_873_000, 0) + // Minimum execution time: 2_129_000 picoseconds. + Weight::from_parts(2_266_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -512,8 +530,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_858_000 picoseconds. - Weight::from_parts(1_991_000, 0) + // Minimum execution time: 2_233_000 picoseconds. + Weight::from_parts(2_351_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -531,8 +549,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `408` // Estimated: `1893` - // Minimum execution time: 10_874_000 picoseconds. - Weight::from_parts(11_265_000, 0) + // Minimum execution time: 15_716_000 picoseconds. + Weight::from_parts(16_160_000, 0) .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -543,8 +561,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `470` // Estimated: `1886` - // Minimum execution time: 6_525_000 picoseconds. - Weight::from_parts(6_769_000, 0) + // Minimum execution time: 8_887_000 picoseconds. + Weight::from_parts(9_178_000, 0) .saturating_add(Weight::from_parts(0, 1886)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -557,36 +575,36 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `914` - // Estimated: `4698` - // Minimum execution time: 51_938_000 picoseconds. - Weight::from_parts(55_025_000, 4698) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Measured: `2829` + // Estimated: `6196` + // Minimum execution time: 130_799_000 picoseconds. + Weight::from_parts(139_893_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `480` - // Estimated: `1516` - // Minimum execution time: 9_628_000 picoseconds. - Weight::from_parts(10_400_000, 1516) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } + // Measured: `1307` + // Estimated: `2487` + // Minimum execution time: 22_945_000 picoseconds. + Weight::from_parts(24_855_000, 0) + .saturating_add(Weight::from_parts(0, 2487)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -601,11 +619,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_new_timeslice() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 45_561_000 picoseconds. - Weight::from_parts(47_306_000, 0) - .saturating_add(Weight::from_parts(0, 3787)) + // Measured: `323` + // Estimated: `3788` + // Minimum execution time: 56_864_000 picoseconds. + Weight::from_parts(59_119_000, 0) + .saturating_add(Weight::from_parts(0, 3788)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs index f69736e31451..dc21e2ea117f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs @@ -22,6 +22,7 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -84,7 +85,11 @@ impl XcmWeightInfo for CoretimeRococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -251,8 +256,16 @@ impl XcmWeightInfo for CoretimeRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index d207c09ffcd8..cdcba6134bf8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -331,7 +331,7 @@ impl WeightInfo { // Minimum execution time: 650_000 picoseconds. Weight::from_parts(673_000, 0) } - pub fn set_asset_claimer() -> Weight { + pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs index 2cabce567b6e..89a593ab0f57 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use coretime_rococo_runtime::xcm_config::LocationToAccountId; +use coretime_rococo_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 149fa5d0b045..226e1c817bb8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's Coretime parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -80,6 +82,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ @@ -172,6 +177,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index f0c03849750a..985e64fb76f9 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -127,6 +127,12 @@ impl CoretimeInterface for CoretimeAllocator { use crate::coretime::CoretimeProviderCalls::RequestCoreCount; let request_core_count_call = RelayRuntimePallets::Coretime(RequestCoreCount(count)); + // Weight for `request_core_count` from westend benchmarks: + // `ref_time` = 7889000 + (3 * 25000000) + (1 * 100000000) = 182889000 + // `proof_size` = 1636 + // Add 5% to each component and round to 2 significant figures. + let call_weight = Weight::from_parts(190_000_000, 1700); + let message = Xcm(vec![ Instruction::UnpaidExecution { weight_limit: WeightLimit::Unlimited, @@ -135,6 +141,7 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: request_core_count_call.encode().into(), + fallback_max_weight: Some(call_weight), }, ]); @@ -164,6 +171,7 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: request_revenue_info_at_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -192,6 +200,7 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: credit_account_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -216,6 +225,12 @@ impl CoretimeInterface for CoretimeAllocator { ) { use crate::coretime::CoretimeProviderCalls::AssignCore; + // Weight for `assign_core` from westend benchmarks: + // `ref_time` = 10177115 + (1 * 25000000) + (2 * 100000000) + (57600 * 13932) = 937660315 + // `proof_size` = 3612 + // Add 5% to each component and round to 2 significant figures. + let call_weight = Weight::from_parts(980_000_000, 3800); + // The relay chain currently only allows `assign_core` to be called with a complete mask // and only ever with increasing `begin`. The assignments must be truncated to avoid // dropping that core's assignment completely. @@ -256,6 +271,7 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: assign_core_call.encode().into(), + fallback_max_weight: Some(call_weight), }, ]); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 1f0f54884fa8..67f7ad7afc6b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -446,6 +446,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -578,6 +579,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -825,7 +827,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1123,7 +1126,9 @@ impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - Err(BenchmarkError::Skip) + let origin = Location::new(1, [Parachain(1000)]); + let target = Location::new(1, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); + Ok((origin, target)) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 74b1c4e47029..ad71691b2174 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_051_000, 0) + // Minimum execution time: 2_274_000 picoseconds. + Weight::from_parts(2_421_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_965_000 picoseconds. - Weight::from_parts(22_774_000, 0) + // Minimum execution time: 26_257_000 picoseconds. + Weight::from_parts(26_802_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_748_000 picoseconds. - Weight::from_parts(21_464_000, 0) + // Minimum execution time: 24_692_000 picoseconds. + Weight::from_parts(25_275_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `1631` - // Minimum execution time: 10_269_000 picoseconds. - Weight::from_parts(10_508_000, 0) + // Minimum execution time: 13_872_000 picoseconds. + Weight::from_parts(14_509_000, 0) .saturating_add(Weight::from_parts(0, 1631)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -121,6 +121,8 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -132,32 +134,34 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12279` // Estimated: `14805 + n * (1 ±0)` - // Minimum execution time: 41_900_000 picoseconds. - Weight::from_parts(80_392_728, 0) + // Minimum execution time: 52_916_000 picoseconds. + Weight::from_parts(96_122_236, 0) .saturating_add(Weight::from_parts(0, 14805)) - // Standard Error: 870 - .saturating_add(Weight::from_parts(4_361, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(13)) - .saturating_add(T::DbWeight::get().writes(26)) + // Standard Error: 969 + .saturating_add(Weight::from_parts(5_732, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(27)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `332` + // Measured: `437` // Estimated: `3593` - // Minimum execution time: 40_911_000 picoseconds. - Weight::from_parts(43_102_000, 0) + // Minimum execution time: 56_955_000 picoseconds. + Weight::from_parts(59_005_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -169,16 +173,18 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `450` + // Measured: `658` // Estimated: `4698` - // Minimum execution time: 70_257_000 picoseconds. - Weight::from_parts(73_889_000, 0) + // Minimum execution time: 108_853_000 picoseconds. + Weight::from_parts(117_467_000, 0) .saturating_add(Weight::from_parts(0, 4698)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -187,8 +193,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 13_302_000 picoseconds. - Weight::from_parts(13_852_000, 0) + // Minimum execution time: 16_922_000 picoseconds. + Weight::from_parts(17_544_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -199,8 +205,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 14_927_000 picoseconds. - Weight::from_parts(15_553_000, 0) + // Minimum execution time: 18_762_000 picoseconds. + Weight::from_parts(19_162_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -211,8 +217,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 16_237_000 picoseconds. - Weight::from_parts(16_995_000, 0) + // Minimum execution time: 20_297_000 picoseconds. + Weight::from_parts(20_767_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -229,8 +235,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `736` // Estimated: `4681` - // Minimum execution time: 24_621_000 picoseconds. - Weight::from_parts(25_165_000, 0) + // Minimum execution time: 31_347_000 picoseconds. + Weight::from_parts(32_259_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -249,8 +255,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `802` // Estimated: `5996` - // Minimum execution time: 29_832_000 picoseconds. - Weight::from_parts(30_894_000, 0) + // Minimum execution time: 38_310_000 picoseconds. + Weight::from_parts(39_777_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -264,13 +270,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// The range of component `m` is `[1, 3]`. fn claim_revenue(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `652` + // Measured: `671` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 55_390_000 picoseconds. - Weight::from_parts(56_124_789, 0) + // Minimum execution time: 65_960_000 picoseconds. + Weight::from_parts(66_194_985, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 41_724 - .saturating_add(Weight::from_parts(1_551_266, 0).saturating_mul(m.into())) + // Standard Error: 42_455 + .saturating_add(Weight::from_parts(1_808_497, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -290,11 +296,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `320` - // Estimated: `3785` - // Minimum execution time: 59_759_000 picoseconds. - Weight::from_parts(61_310_000, 0) - .saturating_add(Weight::from_parts(0, 3785)) + // Measured: `321` + // Estimated: `3786` + // Minimum execution time: 69_918_000 picoseconds. + Weight::from_parts(72_853_000, 0) + .saturating_add(Weight::from_parts(0, 3786)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -306,8 +312,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `3551` - // Minimum execution time: 37_007_000 picoseconds. - Weight::from_parts(51_927_000, 0) + // Minimum execution time: 44_775_000 picoseconds. + Weight::from_parts(58_978_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -322,8 +328,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 86_563_000 picoseconds. - Weight::from_parts(91_274_000, 0) + // Minimum execution time: 67_098_000 picoseconds. + Weight::from_parts(93_626_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -338,10 +344,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `857` + // Measured: `979` // Estimated: `3593` - // Minimum execution time: 93_655_000 picoseconds. - Weight::from_parts(98_160_000, 0) + // Minimum execution time: 89_463_000 picoseconds. + Weight::from_parts(113_286_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -354,8 +360,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `556` // Estimated: `4698` - // Minimum execution time: 33_985_000 picoseconds. - Weight::from_parts(43_618_000, 0) + // Minimum execution time: 42_073_000 picoseconds. + Weight::from_parts(52_211_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -371,30 +377,26 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_778_000 picoseconds. - Weight::from_parts(19_543_425, 0) + // Minimum execution time: 22_937_000 picoseconds. + Weight::from_parts(23_898_154, 0) .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 41 - .saturating_add(Weight::from_parts(33, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(n: u32, ) -> Weight { + fn process_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_505_000 picoseconds. - Weight::from_parts(5_982_015, 0) + // Minimum execution time: 7_650_000 picoseconds. + Weight::from_parts(8_166_809, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 13 - .saturating_add(Weight::from_parts(44, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -402,40 +404,54 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `442` + // Measured: `461` // Estimated: `6196` - // Minimum execution time: 38_128_000 picoseconds. - Weight::from_parts(40_979_000, 0) + // Minimum execution time: 53_023_000 picoseconds. + Weight::from_parts(54_564_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::Reservations` (r:1 w:0) /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:20 w:40) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:21 w:20) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:20) + /// Storage: `Broker::Workplan` (r:0 w:1000) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `12194` - // Estimated: `13506` - // Minimum execution time: 49_041_000 picoseconds. - Weight::from_parts(50_522_788, 0) - .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 72 - .saturating_add(Weight::from_parts(78, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(25)) + // Measured: `16480` + // Estimated: `69404 + n * (8 ±1)` + // Minimum execution time: 29_313_000 picoseconds. + Weight::from_parts(746_062_644, 0) + .saturating_add(Weight::from_parts(0, 69404)) + // Standard Error: 22_496 + .saturating_add(Weight::from_parts(1_545_204, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(44)) + .saturating_add(T::DbWeight::get().writes(57)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(n.into())) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -445,8 +461,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 5_903_000 picoseconds. - Weight::from_parts(6_202_000, 0) + // Minimum execution time: 7_625_000 picoseconds. + Weight::from_parts(7_910_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -469,8 +485,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 31_412_000 picoseconds. - Weight::from_parts(31_964_000, 0) + // Minimum execution time: 36_572_000 picoseconds. + Weight::from_parts(37_316_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -489,8 +505,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 14_098_000 picoseconds. - Weight::from_parts(14_554_000, 0) + // Minimum execution time: 18_362_000 picoseconds. + Weight::from_parts(18_653_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -501,8 +517,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_723_000 picoseconds. - Weight::from_parts(1_822_000, 0) + // Minimum execution time: 2_193_000 picoseconds. + Weight::from_parts(2_393_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -512,8 +528,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_865_000 picoseconds. - Weight::from_parts(1_983_000, 0) + // Minimum execution time: 2_344_000 picoseconds. + Weight::from_parts(2_486_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -531,8 +547,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `408` // Estimated: `1893` - // Minimum execution time: 10_387_000 picoseconds. - Weight::from_parts(10_819_000, 0) + // Minimum execution time: 15_443_000 picoseconds. + Weight::from_parts(15_753_000, 0) .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -543,8 +559,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `150` // Estimated: `1566` - // Minimum execution time: 5_996_000 picoseconds. - Weight::from_parts(6_278_000, 0) + // Minimum execution time: 8_637_000 picoseconds. + Weight::from_parts(8_883_000, 0) .saturating_add(Weight::from_parts(0, 1566)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -557,44 +573,44 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `914` - // Estimated: `4698` - // Minimum execution time: 51_938_000 picoseconds. - Weight::from_parts(55_025_000, 4698) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Measured: `1451` + // Estimated: `6196` + // Minimum execution time: 120_585_000 picoseconds. + Weight::from_parts(148_755_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `480` - // Estimated: `1516` - // Minimum execution time: 9_628_000 picoseconds. - Weight::from_parts(10_400_000, 1516) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } + // Measured: `506` + // Estimated: `1686` + // Minimum execution time: 18_235_000 picoseconds. + Weight::from_parts(19_113_000, 0) + .saturating_add(Weight::from_parts(0, 1686)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn on_new_timeslice() -> Weight { // Proof Size summary in bytes: - // Measured: `0` + // Measured: `103` // Estimated: `3593` - // Minimum execution time: 2_187_000 picoseconds. - Weight::from_parts(2_372_000, 0) + // Minimum execution time: 4_863_000 picoseconds. + Weight::from_parts(5_045_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index 1640baa38c99..2f7529481543 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -21,6 +21,7 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -83,7 +84,11 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -172,8 +177,16 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() @@ -248,8 +261,7 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX + XcmGeneric::::alias_origin() } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index fb6e4631736d..2d10ac16ea26 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `9340d096ec0f`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=coretime-westend-dev +// --pallet=pallet_xcm_benchmarks::generic +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::generic -// --chain=coretime-westend-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/ +// --template=cumulus/templates/xcm-bench-template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,8 +66,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 29_463_000 picoseconds. - Weight::from_parts(30_178_000, 3571) + // Minimum execution time: 30_717_000 picoseconds. + Weight::from_parts(31_651_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,15 +75,26 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 568_000 picoseconds. - Weight::from_parts(608_000, 0) + // Minimum execution time: 618_000 picoseconds. + Weight::from_parts(659_000, 0) } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 3_504_000 picoseconds. + Weight::from_parts(3_757_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_530_000 picoseconds. - Weight::from_parts(1_585_000, 0) + // Minimum execution time: 643_000 picoseconds. + Weight::from_parts(702_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -89,58 +102,65 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_400_000 picoseconds. - Weight::from_parts(7_572_000, 3497) + // Minimum execution time: 7_799_000 picoseconds. + Weight::from_parts(8_037_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_951_000 picoseconds. - Weight::from_parts(7_173_000, 0) + // Minimum execution time: 6_910_000 picoseconds. + Weight::from_parts(7_086_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_245_000 picoseconds. - Weight::from_parts(1_342_000, 0) + // Minimum execution time: 1_257_000 picoseconds. + Weight::from_parts(1_384_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 613_000 picoseconds. - Weight::from_parts(657_000, 0) + // Minimum execution time: 634_000 picoseconds. + Weight::from_parts(687_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 613_000 picoseconds. - Weight::from_parts(656_000, 0) + // Minimum execution time: 604_000 picoseconds. + Weight::from_parts(672_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 570_000 picoseconds. - Weight::from_parts(608_000, 0) + // Minimum execution time: 593_000 picoseconds. + Weight::from_parts(643_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 557_000 picoseconds. - Weight::from_parts(607_000, 0) + // Minimum execution time: 630_000 picoseconds. + Weight::from_parts(694_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 706_000 picoseconds. + Weight::from_parts(764_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 557_000 picoseconds. - Weight::from_parts(578_000, 0) + // Minimum execution time: 606_000 picoseconds. + Weight::from_parts(705_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -158,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 26_179_000 picoseconds. - Weight::from_parts(27_089_000, 3571) + // Minimum execution time: 27_188_000 picoseconds. + Weight::from_parts(27_847_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -169,8 +189,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_724_000 picoseconds. - Weight::from_parts(10_896_000, 3555) + // Minimum execution time: 11_170_000 picoseconds. + Weight::from_parts(11_416_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -178,8 +198,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 567_000 picoseconds. - Weight::from_parts(623_000, 0) + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(653_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -197,8 +217,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_367_000 picoseconds. - Weight::from_parts(25_072_000, 3539) + // Minimum execution time: 25_196_000 picoseconds. + Weight::from_parts(25_641_000, 3539) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -208,44 +228,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_554_000 picoseconds. - Weight::from_parts(2_757_000, 0) + // Minimum execution time: 2_686_000 picoseconds. + Weight::from_parts(2_827_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 922_000 picoseconds. - Weight::from_parts(992_000, 0) + // Minimum execution time: 989_000 picoseconds. + Weight::from_parts(1_051_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 688_000 picoseconds. - Weight::from_parts(723_000, 0) + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(766_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(647_000, 0) + // Minimum execution time: 626_000 picoseconds. + Weight::from_parts(657_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 591_000 picoseconds. - Weight::from_parts(620_000, 0) + // Minimum execution time: 595_000 picoseconds. + Weight::from_parts(639_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 735_000 picoseconds. - Weight::from_parts(802_000, 0) + // Minimum execution time: 755_000 picoseconds. + Weight::from_parts(820_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -263,8 +283,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 29_923_000 picoseconds. - Weight::from_parts(30_770_000, 3571) + // Minimum execution time: 31_409_000 picoseconds. + Weight::from_parts(32_098_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -272,8 +292,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_884_000 picoseconds. - Weight::from_parts(3_088_000, 0) + // Minimum execution time: 3_258_000 picoseconds. + Weight::from_parts(3_448_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -291,8 +311,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_228_000, 3571) + // Minimum execution time: 27_200_000 picoseconds. + Weight::from_parts(28_299_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -300,49 +320,42 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 599_000 picoseconds. - Weight::from_parts(655_000, 0) + // Minimum execution time: 659_000 picoseconds. + Weight::from_parts(699_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 587_000 picoseconds. - Weight::from_parts(628_000, 0) + // Minimum execution time: 595_000 picoseconds. + Weight::from_parts(647_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 572_000 picoseconds. - Weight::from_parts(631_000, 0) + // Minimum execution time: 583_000 picoseconds. + Weight::from_parts(617_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 570_000 picoseconds. - Weight::from_parts(615_000, 0) + // Minimum execution time: 595_000 picoseconds. + Weight::from_parts(633_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 624_000 picoseconds. - Weight::from_parts(659_000, 0) + // Minimum execution time: 610_000 picoseconds. + Weight::from_parts(670_000, 0) } - pub fn set_asset_claimer() -> Weight { + pub fn alias_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 707_000 picoseconds. - Weight::from_parts(749_000, 0) - } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) + // Minimum execution time: 630_000 picoseconds. + Weight::from_parts(700_000, 0) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index 9f38975efae6..8a4879a1506e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -39,7 +39,8 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, + AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, @@ -54,6 +55,7 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); pub const TokenRelayLocation: Location = Location::parent(); + pub AssetHubLocation: Location = Location::new(1, [Parachain(1000)]); pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = @@ -191,6 +193,10 @@ pub type WaivedLocations = ( Equals, ); +/// We allow locations to alias into their own child locations, as well as +/// AssetHub to alias into anything. +pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -232,7 +238,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs index e391d71a9ab7..976ce23d6e87 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use coretime_westend_runtime::xcm_config::LocationToAccountId; +use coretime_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 09b4ef679d24..f2922b710e24 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Glutton parachain runtime." +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -75,6 +77,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 34458c2352fb..4984f6314f87 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's People parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -77,6 +79,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ @@ -171,6 +176,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 25356a84806d..dc5f2ac0997c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -407,6 +407,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -520,6 +521,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -781,7 +783,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs index 631cc7b7f0b0..d55198f60a00 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs @@ -21,6 +21,7 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -83,7 +84,11 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -250,8 +255,16 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 6aac6119e7ec..caa916507348 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -331,7 +331,7 @@ impl WeightInfo { // Minimum execution time: 685_000 picoseconds. Weight::from_parts(757_000, 0) } - pub fn set_asset_claimer() -> Weight { + pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` diff --git a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs index 3627d9c40ec2..00fe7781822a 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs @@ -17,7 +17,9 @@ #![cfg(test)] use parachains_common::AccountId; -use people_rococo_runtime::xcm_config::LocationToAccountId; +use people_rococo_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 6840b97d8c3f..7822df585a58 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's People parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -77,6 +79,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ @@ -171,6 +176,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 1c5183636c49..3265062a0441 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -406,6 +406,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -519,6 +520,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -779,7 +781,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1043,7 +1046,9 @@ impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - Err(BenchmarkError::Skip) + let origin = Location::new(1, [Parachain(1000)]); + let target = Location::new(1, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); + Ok((origin, target)) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index 4b51a3ba411b..466da1eadd55 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -21,6 +21,7 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -83,7 +84,11 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -244,14 +249,21 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX + XcmGeneric::::alias_origin() } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 36400f2c1e66..3fa51a816b69 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `9340d096ec0f`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("people-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=people-westend-dev +// --pallet=pallet_xcm_benchmarks::generic +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/xcm // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::generic -// --chain=people-westend-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/ +// --template=cumulus/templates/xcm-bench-template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,10 +64,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 29_015_000 picoseconds. - Weight::from_parts(30_359_000, 3535) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 31_309_000 picoseconds. + Weight::from_parts(31_924_000, 3572) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,15 +75,26 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 572_000 picoseconds. - Weight::from_parts(637_000, 0) + // Minimum execution time: 635_000 picoseconds. + Weight::from_parts(677_000, 0) } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 3_457_000 picoseconds. + Weight::from_parts(3_656_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_550_000 picoseconds. - Weight::from_parts(1_604_000, 0) + // Minimum execution time: 644_000 picoseconds. + Weight::from_parts(695_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -89,58 +102,65 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_354_000 picoseconds. - Weight::from_parts(7_808_000, 3497) + // Minimum execution time: 7_701_000 picoseconds. + Weight::from_parts(8_120_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_716_000 picoseconds. - Weight::from_parts(7_067_000, 0) + // Minimum execution time: 6_945_000 picoseconds. + Weight::from_parts(7_187_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_280_000 picoseconds. - Weight::from_parts(1_355_000, 0) + // Minimum execution time: 1_352_000 picoseconds. + Weight::from_parts(1_428_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 587_000 picoseconds. - Weight::from_parts(645_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(648_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 629_000 picoseconds. - Weight::from_parts(662_000, 0) + // Minimum execution time: 621_000 picoseconds. + Weight::from_parts(661_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 590_000 picoseconds. - Weight::from_parts(639_000, 0) + // Minimum execution time: 591_000 picoseconds. + Weight::from_parts(655_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 651_000 picoseconds. - Weight::from_parts(688_000, 0) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(736_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 694_000 picoseconds. + Weight::from_parts(759_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 601_000 picoseconds. - Weight::from_parts(630_000, 0) + // Minimum execution time: 632_000 picoseconds. + Weight::from_parts(664_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -156,10 +176,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 25_650_000 picoseconds. - Weight::from_parts(26_440_000, 3535) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 26_932_000 picoseconds. + Weight::from_parts(27_882_000, 3572) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -169,8 +189,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_492_000 picoseconds. - Weight::from_parts(10_875_000, 3555) + // Minimum execution time: 11_316_000 picoseconds. + Weight::from_parts(11_608_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -178,8 +198,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 597_000 picoseconds. - Weight::from_parts(647_000, 0) + // Minimum execution time: 564_000 picoseconds. + Weight::from_parts(614_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -197,8 +217,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_732_000 picoseconds. - Weight::from_parts(24_290_000, 3503) + // Minimum execution time: 24_373_000 picoseconds. + Weight::from_parts(25_068_000, 3503) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -208,44 +228,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_446_000 picoseconds. - Weight::from_parts(2_613_000, 0) + // Minimum execution time: 2_582_000 picoseconds. + Weight::from_parts(2_714_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 960_000 picoseconds. - Weight::from_parts(1_045_000, 0) + // Minimum execution time: 952_000 picoseconds. + Weight::from_parts(1_059_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 703_000 picoseconds. - Weight::from_parts(739_000, 0) + // Minimum execution time: 684_000 picoseconds. + Weight::from_parts(734_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 616_000 picoseconds. - Weight::from_parts(651_000, 0) + // Minimum execution time: 600_000 picoseconds. + Weight::from_parts(650_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 621_000 picoseconds. - Weight::from_parts(660_000, 0) + // Minimum execution time: 599_000 picoseconds. + Weight::from_parts(628_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 794_000 picoseconds. - Weight::from_parts(831_000, 0) + // Minimum execution time: 769_000 picoseconds. + Weight::from_parts(816_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -261,10 +281,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 29_527_000 picoseconds. - Weight::from_parts(30_614_000, 3535) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 31_815_000 picoseconds. + Weight::from_parts(32_738_000, 3572) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -272,8 +292,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_189_000 picoseconds. - Weight::from_parts(3_296_000, 0) + // Minimum execution time: 3_462_000 picoseconds. + Weight::from_parts(3_563_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -289,10 +309,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 25_965_000 picoseconds. - Weight::from_parts(26_468_000, 3535) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 27_752_000 picoseconds. + Weight::from_parts(28_455_000, 3572) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -300,49 +320,42 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 618_000 picoseconds. - Weight::from_parts(659_000, 0) + // Minimum execution time: 605_000 picoseconds. + Weight::from_parts(687_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 593_000 picoseconds. - Weight::from_parts(618_000, 0) + // Minimum execution time: 610_000 picoseconds. + Weight::from_parts(646_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 603_000 picoseconds. - Weight::from_parts(634_000, 0) + // Minimum execution time: 579_000 picoseconds. + Weight::from_parts(636_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 568_000 picoseconds. - Weight::from_parts(629_000, 0) + // Minimum execution time: 583_000 picoseconds. + Weight::from_parts(626_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 598_000 picoseconds. - Weight::from_parts(655_000, 0) - } - pub fn set_asset_claimer() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 707_000 picoseconds. - Weight::from_parts(749_000, 0) + // Minimum execution time: 616_000 picoseconds. + Weight::from_parts(679_000, 0) } - pub fn execute_with_origin() -> Weight { + pub fn alias_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) + // Minimum execution time: 626_000 picoseconds. + Weight::from_parts(687_000, 0) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 25256495ef91..7eaa43c05b20 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -36,7 +36,8 @@ use polkadot_parachain_primitives::primitives::Sibling; use sp_runtime::traits::AccountIdConversion; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, + AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, DescribeTerminus, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, @@ -51,6 +52,7 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); pub const RelayLocation: Location = Location::parent(); + pub AssetHubLocation: Location = Location::new(1, [Parachain(1000)]); pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = @@ -195,6 +197,10 @@ pub type WaivedLocations = ( LocalPlurality, ); +/// We allow locations to alias into their own child locations, as well as +/// AssetHub to alias into anything. +pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -236,7 +242,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs index fa9331952b4b..5cefec44b1cd 100644 --- a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs @@ -17,7 +17,9 @@ #![cfg(test)] use parachains_common::AccountId; -use people_westend_runtime::xcm_config::LocationToAccountId; +use people_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index 01d7fcc2b5c8..17c81ae4921a 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utils for Runtimes testing" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -29,6 +31,7 @@ cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } +parachains-common = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true } @@ -37,6 +40,7 @@ cumulus-test-relay-sproof-builder = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } polkadot-parachain-primitives = { workspace = true } [dev-dependencies] @@ -62,11 +66,13 @@ std = [ "pallet-timestamp/std", "pallet-xcm/std", "parachain-info/std", + "parachains-common/std", "polkadot-parachain-primitives/std", "sp-consensus-aura/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "xcm-executor/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index 05ecf6ca8e81..5c33809ba67b 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -445,7 +445,11 @@ impl< // prepare xcm as governance will do let xcm = Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { origin_kind: OriginKind::Superuser, call: call.into() }, + Transact { + origin_kind: OriginKind::Superuser, + call: call.into(), + fallback_max_weight: None, + }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -460,18 +464,26 @@ impl< ) } - pub fn execute_as_origin_xcm( - origin: Location, + pub fn execute_as_origin( + (origin, origin_kind): (Location, OriginKind), call: Call, - buy_execution_fee: Asset, + maybe_buy_execution_fee: Option, ) -> Outcome { + let mut instructions = if let Some(buy_execution_fee) = maybe_buy_execution_fee { + vec![ + WithdrawAsset(buy_execution_fee.clone().into()), + BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, + ] + } else { + vec![UnpaidExecution { check_origin: None, weight_limit: Unlimited }] + }; + // prepare `Transact` xcm - let xcm = Xcm(vec![ - WithdrawAsset(buy_execution_fee.clone().into()), - BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { origin_kind: OriginKind::Xcm, call: call.encode().into() }, + instructions.extend(vec![ + Transact { origin_kind, call: call.encode().into(), fallback_max_weight: None }, ExpectTransactStatus(MaybeErrorCode::Success), ]); + let xcm = Xcm(instructions); // execute xcm as parent origin let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); diff --git a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs index a66163154cf6..6bdf3ef09d1b 100644 --- a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs @@ -18,7 +18,15 @@ use crate::{AccountIdOf, CollatorSessionKeys, ExtBuilder, ValidatorIdOf}; use codec::Encode; -use frame_support::{assert_ok, traits::Get}; +use frame_support::{ + assert_ok, + traits::{Get, OriginTrait}, +}; +use parachains_common::AccountId; +use sp_runtime::traits::{Block as BlockT, StaticLookup}; +use xcm_runtime_apis::fees::{ + runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, +}; type RuntimeHelper = crate::RuntimeHelper; @@ -128,3 +136,60 @@ pub fn set_storage_keys_by_governance_works( assert_storage(); }); } + +pub fn xcm_payment_api_with_native_token_works() +where + Runtime: XcmPaymentApiV1 + + frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, + ValidatorIdOf: From>, + RuntimeOrigin: OriginTrait::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + Block: BlockT, +{ + use xcm::prelude::*; + ExtBuilder::::default().build().execute_with(|| { + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + + // We first try calling it with a lower XCM version. + let lower_version_xcm_to_weigh = + versioned_xcm_to_weigh.clone().into_version(XCM_VERSION - 1).unwrap(); + let xcm_weight = Runtime::query_xcm_weight(lower_version_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let native_token: Location = Parent.into(); + let native_token_versioned = VersionedAssetId::from(AssetId(native_token)); + let lower_version_native_token = + native_token_versioned.clone().into_version(XCM_VERSION - 1).unwrap(); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), lower_version_native_token); + assert!(execution_fees.is_ok()); + + // Now we call it with the latest version. + let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); + assert!(execution_fees.is_ok()); + + // If we call it with anything other than the native token it will error. + let non_existent_token: Location = Here.into(); + let non_existent_token_versioned = VersionedAssetId::from(AssetId(non_existent_token)); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), non_existent_token_versioned); + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + }); +} diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 3a6b9d42f211..3bd1e5c6f436 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -175,6 +175,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index b0581c8d43ff..035d0ac94be6 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Simple runtime used by the rococo parachain(s)" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -134,6 +136,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] # A feature that should be enabled when the runtime should be built for on-chain diff --git a/cumulus/polkadot-omni-node/Cargo.toml b/cumulus/polkadot-omni-node/Cargo.toml index a736e1ef80c5..8b46bc882868 100644 --- a/cumulus/polkadot-omni-node/Cargo.toml +++ b/cumulus/polkadot-omni-node/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true build = "build.rs" description = "Generic binary that can run a parachain node with u32 block number and Aura consensus" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/polkadot-omni-node/README.md b/cumulus/polkadot-omni-node/README.md index d87b3b63c407..015019961c9f 100644 --- a/cumulus/polkadot-omni-node/README.md +++ b/cumulus/polkadot-omni-node/README.md @@ -49,10 +49,10 @@ chain-spec-builder create --relay-chain --para-id -r +polkadot-omni-node --dev --chain ``` ## Useful links diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index a690229f1695..43478b41a119 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Helper library that can be used to build a parachain node" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -26,10 +28,13 @@ docify = { workspace = true } # Local jsonrpsee = { features = ["server"], workspace = true } parachains-common = { workspace = true, default-features = true } +scale-info = { workspace = true } +subxt-metadata = { workspace = true, default-features = true } # Substrate frame-benchmarking = { optional = true, workspace = true, default-features = true } frame-benchmarking-cli = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true } sp-runtime = { workspace = true } sp-core = { workspace = true, default-features = true } sp-session = { workspace = true, default-features = true } @@ -55,12 +60,16 @@ sc-rpc = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } sp-weights = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } +sc-runtime-utilities = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sc-consensus-manual-seal = { workspace = true, default-features = true } sc-sysinfo = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } @@ -91,15 +100,12 @@ assert_cmd = { workspace = true } nix = { features = ["signal"], workspace = true } tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } wait-timeout = { workspace = true } +cumulus-test-runtime = { workspace = true } [features] default = [] -rococo-native = [ - "polkadot-cli/rococo-native", -] -westend-native = [ - "polkadot-cli/westend-native", -] +rococo-native = ["polkadot-cli/rococo-native"] +westend-native = ["polkadot-cli/westend-native"] runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", diff --git a/cumulus/polkadot-omni-node/lib/src/cli.rs b/cumulus/polkadot-omni-node/lib/src/cli.rs index dc59c185d909..9c4e2561592d 100644 --- a/cumulus/polkadot-omni-node/lib/src/cli.rs +++ b/cumulus/polkadot-omni-node/lib/src/cli.rs @@ -126,9 +126,14 @@ pub struct Cli { /// Start a dev node that produces a block each `dev_block_time` ms. /// - /// This is a dev option, and it won't result in starting or connecting to a parachain network. - /// The resulting node will work on its own, running the wasm blob and artificially producing - /// a block each `dev_block_time` ms, as if it was part of a parachain. + /// This is a dev option. It enables a manual sealing, meaning blocks are produced manually + /// rather than being part of an actual network consensus process. Using the option won't + /// result in starting or connecting to a parachain network. The resulting node will work on + /// its own, running the wasm blob and artificially producing a block each `dev_block_time` ms, + /// as if it was part of a parachain. + /// + /// The `--dev` flag sets the `dev_block_time` to a default value of 3000ms unless explicitly + /// provided. #[arg(long)] pub dev_block_time: Option, diff --git a/cumulus/polkadot-omni-node/lib/src/command.rs b/cumulus/polkadot-omni-node/lib/src/command.rs index cf283819966f..fe7f7cac0971 100644 --- a/cumulus/polkadot-omni-node/lib/src/command.rs +++ b/cumulus/polkadot-omni-node/lib/src/command.rs @@ -34,11 +34,13 @@ use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunc use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; -use sc_cli::{Result, SubstrateCli}; +use sc_cli::{CliConfiguration, Result, SubstrateCli}; use sp_runtime::traits::AccountIdConversion; #[cfg(feature = "runtime-benchmarks")] use sp_runtime::traits::HashingFor; +const DEFAULT_DEV_BLOCK_TIME_MS: u64 = 3000; + /// Structure that can be used in order to provide customizers for different functionalities of the /// node binary that is being built using this library. pub struct RunConfig { @@ -230,10 +232,19 @@ pub fn run(cmd_config: RunConfig) -> Result<() .ok_or("Could not find parachain extension in chain-spec.")?, ); + if cli.run.base.is_dev()? { + // Set default dev block time to 3000ms if not set. + // TODO: take block time from AURA config if set. + let dev_block_time = cli.dev_block_time.unwrap_or(DEFAULT_DEV_BLOCK_TIME_MS); + return node_spec + .start_manual_seal_node(config, para_id, dev_block_time) + .map_err(Into::into); + } + if let Some(dev_block_time) = cli.dev_block_time { return node_spec .start_manual_seal_node(config, para_id, dev_block_time) - .map_err(Into::into) + .map_err(Into::into); } // If Statemint (Statemine, Westmint, Rockmine) DB exists and we're using the diff --git a/cumulus/polkadot-omni-node/lib/src/common/runtime.rs b/cumulus/polkadot-omni-node/lib/src/common/runtime.rs index 509d13b9d7a2..2a95f41495a6 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/runtime.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/runtime.rs @@ -16,7 +16,19 @@ //! Runtime parameters. +use codec::Decode; +use cumulus_client_service::ParachainHostFunctions; use sc_chain_spec::ChainSpec; +use sc_executor::WasmExecutor; +use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; +use scale_info::{form::PortableForm, TypeDef, TypeDefPrimitive}; +use std::fmt::Display; +use subxt_metadata::{Metadata, StorageEntryType}; + +/// Expected parachain system pallet runtime type name. +pub const DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME: &str = "ParachainSystem"; +/// Expected frame system pallet runtime type name. +pub const DEFAULT_FRAME_SYSTEM_PALLET_NAME: &str = "System"; /// The Aura ID used by the Aura consensus #[derive(PartialEq)] @@ -35,7 +47,7 @@ pub enum Consensus { } /// The choice of block number for the parachain omni-node. -#[derive(PartialEq)] +#[derive(PartialEq, Debug)] pub enum BlockNumber { /// u32 U32, @@ -43,6 +55,34 @@ pub enum BlockNumber { U64, } +impl Display for BlockNumber { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockNumber::U32 => write!(f, "u32"), + BlockNumber::U64 => write!(f, "u64"), + } + } +} + +impl Into for BlockNumber { + fn into(self) -> TypeDefPrimitive { + match self { + BlockNumber::U32 => TypeDefPrimitive::U32, + BlockNumber::U64 => TypeDefPrimitive::U64, + } + } +} + +impl BlockNumber { + fn from_type_def(type_def: &TypeDef) -> Option { + match type_def { + TypeDef::Primitive(TypeDefPrimitive::U32) => Some(BlockNumber::U32), + TypeDef::Primitive(TypeDefPrimitive::U64) => Some(BlockNumber::U64), + _ => None, + } + } +} + /// Helper enum listing the supported Runtime types #[derive(PartialEq)] pub enum Runtime { @@ -62,7 +102,108 @@ pub trait RuntimeResolver { pub struct DefaultRuntimeResolver; impl RuntimeResolver for DefaultRuntimeResolver { - fn runtime(&self, _chain_spec: &dyn ChainSpec) -> sc_cli::Result { - Ok(Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519))) + fn runtime(&self, chain_spec: &dyn ChainSpec) -> sc_cli::Result { + let metadata_inspector = MetadataInspector::new(chain_spec)?; + let block_number = match metadata_inspector.block_number() { + Some(inner) => inner, + None => { + log::warn!( + r#"⚠️ There isn't a runtime type named `System`, corresponding to the `frame-system` + pallet (https://docs.rs/frame-system/latest/frame_system/). Please check Omni Node docs for runtime conventions: + https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html#runtime-conventions. + Note: We'll assume a block number size of `u32`."# + ); + BlockNumber::U32 + }, + }; + + if !metadata_inspector.pallet_exists(DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME) { + log::warn!( + r#"⚠️ The parachain system pallet (https://docs.rs/crate/cumulus-pallet-parachain-system/latest) is + missing from the runtime’s metadata. Please check Omni Node docs for runtime conventions: + https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html#runtime-conventions."# + ); + } + + Ok(Runtime::Omni(block_number, Consensus::Aura(AuraConsensusId::Sr25519))) + } +} + +struct MetadataInspector(Metadata); + +impl MetadataInspector { + fn new(chain_spec: &dyn ChainSpec) -> Result { + MetadataInspector::fetch_metadata(chain_spec).map(MetadataInspector) + } + + fn pallet_exists(&self, name: &str) -> bool { + self.0.pallet_by_name(name).is_some() + } + + fn block_number(&self) -> Option { + let pallet_metadata = self.0.pallet_by_name(DEFAULT_FRAME_SYSTEM_PALLET_NAME); + pallet_metadata + .and_then(|inner| inner.storage()) + .and_then(|inner| inner.entry_by_name("Number")) + .and_then(|number_ty| match number_ty.entry_type() { + StorageEntryType::Plain(ty_id) => Some(ty_id), + _ => None, + }) + .and_then(|ty_id| self.0.types().resolve(*ty_id)) + .and_then(|portable_type| BlockNumber::from_type_def(&portable_type.type_def)) + } + + fn fetch_metadata(chain_spec: &dyn ChainSpec) -> Result { + let mut storage = chain_spec.build_storage()?; + let code_bytes = storage + .top + .remove(sp_storage::well_known_keys::CODE) + .ok_or("chain spec genesis does not contain code")?; + let opaque_metadata = fetch_latest_metadata_from_code_blob( + &WasmExecutor::::builder() + .with_allow_missing_host_functions(true) + .build(), + sp_runtime::Cow::Borrowed(code_bytes.as_slice()), + ) + .map_err(|err| err.to_string())?; + + Metadata::decode(&mut (*opaque_metadata).as_slice()).map_err(Into::into) + } +} + +#[cfg(test)] +mod tests { + use crate::runtime::{ + BlockNumber, MetadataInspector, DEFAULT_FRAME_SYSTEM_PALLET_NAME, + DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME, + }; + use codec::Decode; + use cumulus_client_service::ParachainHostFunctions; + use sc_executor::WasmExecutor; + use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; + + fn cumulus_test_runtime_metadata() -> subxt_metadata::Metadata { + let opaque_metadata = fetch_latest_metadata_from_code_blob( + &WasmExecutor::::builder() + .with_allow_missing_host_functions(true) + .build(), + sp_runtime::Cow::Borrowed(cumulus_test_runtime::WASM_BINARY.unwrap()), + ) + .unwrap(); + + subxt_metadata::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap() + } + + #[test] + fn test_pallet_exists() { + let metadata_inspector = MetadataInspector(cumulus_test_runtime_metadata()); + assert!(metadata_inspector.pallet_exists(DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME)); + assert!(metadata_inspector.pallet_exists(DEFAULT_FRAME_SYSTEM_PALLET_NAME)); + } + + #[test] + fn test_runtime_block_number() { + let metadata_inspector = MetadataInspector(cumulus_test_runtime_metadata()); + assert_eq!(metadata_inspector.block_number().unwrap(), BlockNumber::U32); } } diff --git a/cumulus/polkadot-omni-node/lib/src/common/spec.rs b/cumulus/polkadot-omni-node/lib/src/common/spec.rs index 259f89049c92..868368f3ca1a 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/spec.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/spec.rs @@ -44,23 +44,28 @@ use sc_transaction_pool::TransactionPoolHandle; use sp_keystore::KeystorePtr; use std::{future::Future, pin::Pin, sync::Arc, time::Duration}; -pub(crate) trait BuildImportQueue { +pub(crate) trait BuildImportQueue< + Block: BlockT, + RuntimeApi, + BlockImport: sc_consensus::BlockImport, +> +{ fn build_import_queue( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, config: &Configuration, telemetry_handle: Option, task_manager: &TaskManager, ) -> sc_service::error::Result>; } -pub(crate) trait StartConsensus +pub(crate) trait StartConsensus where RuntimeApi: ConstructNodeRuntimeApi>, { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, @@ -74,6 +79,7 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, node_extra_args: NodeExtraArgs, + block_import_extra_return_value: BIAuxiliaryData, ) -> Result<(), sc_service::Error>; } @@ -92,6 +98,31 @@ fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { } } +pub(crate) trait InitBlockImport { + type BlockImport: sc_consensus::BlockImport + Clone + Send + Sync; + type BlockImportAuxiliaryData; + + fn init_block_import( + client: Arc>, + ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)>; +} + +pub(crate) struct ClientBlockImport; + +impl InitBlockImport for ClientBlockImport +where + RuntimeApi: Send + ConstructNodeRuntimeApi>, +{ + type BlockImport = Arc>; + type BlockImportAuxiliaryData = (); + + fn init_block_import( + client: Arc>, + ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)> { + Ok((client.clone(), ())) + } +} + pub(crate) trait BaseNodeSpec { type Block: NodeBlock; @@ -100,7 +131,13 @@ pub(crate) trait BaseNodeSpec { ParachainClient, >; - type BuildImportQueue: BuildImportQueue; + type BuildImportQueue: BuildImportQueue< + Self::Block, + Self::RuntimeApi, + >::BlockImport, + >; + + type InitBlockImport: self::InitBlockImport; /// Starts a `ServiceBuilder` for a full service. /// @@ -108,7 +145,14 @@ pub(crate) trait BaseNodeSpec { /// be able to perform chain operations. fn new_partial( config: &Configuration, - ) -> sc_service::error::Result> { + ) -> sc_service::error::Result< + ParachainService< + Self::Block, + Self::RuntimeApi, + >::BlockImport, + >::BlockImportAuxiliaryData + > + >{ let telemetry = config .telemetry_endpoints .clone() @@ -160,7 +204,10 @@ pub(crate) trait BaseNodeSpec { .build(), ); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let (block_import, block_import_auxiliary_data) = + Self::InitBlockImport::init_block_import(client.clone())?; + + let block_import = ParachainBlockImport::new(block_import, backend.clone()); let import_queue = Self::BuildImportQueue::build_import_queue( client.clone(), @@ -178,7 +225,7 @@ pub(crate) trait BaseNodeSpec { task_manager, transaction_pool, select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), + other: (block_import, telemetry, telemetry_worker_handle, block_import_auxiliary_data), }) } } @@ -190,7 +237,12 @@ pub(crate) trait NodeSpec: BaseNodeSpec { TransactionPoolHandle>, >; - type StartConsensus: StartConsensus; + type StartConsensus: StartConsensus< + Self::Block, + Self::RuntimeApi, + >::BlockImport, + >::BlockImportAuxiliaryData, + >; const SYBIL_RESISTANCE: CollatorSybilResistance; @@ -208,151 +260,153 @@ pub(crate) trait NodeSpec: BaseNodeSpec { where Net: NetworkBackend, { - Box::pin( - async move { - let parachain_config = prepare_node_config(parachain_config); - - let params = Self::new_partial(¶chain_config)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::<_, _, Net>::new( - ¶chain_config.network, - prometheus_registry.clone(), - ); - - let (network, system_rpc_tx, tx_handler_controller, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level: Self::SYBIL_RESISTANCE, - }) - .await?; - - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - let backend_for_rpc = backend.clone(); - - Box::new(move |_| { - Self::BuildRpcExtensions::build_rpc_extensions( - client.clone(), - backend_for_rpc.clone(), - transaction_pool.clone(), - ) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, + let fut = async move { + let parachain_config = prepare_node_config(parachain_config); + + let params = Self::new_partial(¶chain_config)?; + let (block_import, mut telemetry, telemetry_worker_handle, block_import_auxiliary_data) = + params.other; + let client = params.client.clone(); + let backend = params.backend.clone(); + let mut task_manager = params.task_manager; + let (relay_chain_interface, collator_key) = build_relay_chain_interface( + polkadot_config, + ¶chain_config, + telemetry_worker_handle, + &mut task_manager, + collator_options.clone(), + hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + + let validator = parachain_config.role.is_authority(); + let prometheus_registry = parachain_config.prometheus_registry().cloned(); + let transaction_pool = params.transaction_pool.clone(); + let import_queue_service = params.import_queue.service(); + let net_config = FullNetworkConfiguration::<_, _, Net>::new( + ¶chain_config.network, + prometheus_registry.clone(), + ); + + let (network, system_rpc_tx, tx_handler_controller, sync_service) = + build_network(BuildNetworkParams { + parachain_config: ¶chain_config, + net_config, client: client.clone(), transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), para_id, + spawn_handle: task_manager.spawn_handle(), relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service, - })?; - - if validator { - Self::StartConsensus::start_consensus( + import_queue: params.import_queue, + sybil_resistance_level: Self::SYBIL_RESISTANCE, + }) + .await?; + + let rpc_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |_| { + Self::BuildRpcExtensions::build_rpc_extensions( client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), - node_extra_args, - )?; + backend_for_rpc.clone(), + transaction_pool.clone(), + ) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + rpc_builder, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + config: parachain_config, + keystore: params.keystore_container.keystore(), + backend: backend.clone(), + network: network.clone(), + sync_service: sync_service.clone(), + system_rpc_tx, + tx_handler_controller, + telemetry: telemetry.as_mut(), + })?; + + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + if validator { + warn_if_slow_hardware(&hwbench); } - Ok(task_manager) + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } + + let announce_block = { + let sync_service = sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + + let relay_chain_slot_duration = Duration::from_secs(6); + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + start_relay_chain_tasks(StartRelayChainTasksParams { + client: client.clone(), + announce_block: announce_block.clone(), + para_id, + relay_chain_interface: relay_chain_interface.clone(), + task_manager: &mut task_manager, + da_recovery_profile: if validator { + DARecoveryProfile::Collator + } else { + DARecoveryProfile::FullNode + }, + import_queue: import_queue_service, + relay_chain_slot_duration, + recovery_handle: Box::new(overseer_handle.clone()), + sync_service, + })?; + + if validator { + Self::StartConsensus::start_consensus( + client.clone(), + block_import, + prometheus_registry.as_ref(), + telemetry.as_ref().map(|t| t.handle()), + &task_manager, + relay_chain_interface.clone(), + transaction_pool, + params.keystore_container.keystore(), + relay_chain_slot_duration, + para_id, + collator_key.expect("Command line arguments do not allow this. qed"), + overseer_handle, + announce_block, + backend.clone(), + node_extra_args, + block_import_auxiliary_data, + )?; } - .instrument(sc_tracing::tracing::info_span!( + + Ok(task_manager) + }; + + Box::pin(Instrument::instrument( + fut, + sc_tracing::tracing::info_span!( sc_tracing::logging::PREFIX_LOG_SPAN, - name = "Parachain", - )), - ) + name = "Parachain" + ), + )) } } diff --git a/cumulus/polkadot-omni-node/lib/src/common/types.rs b/cumulus/polkadot-omni-node/lib/src/common/types.rs index 4bc58dc9db7e..978368be2584 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/types.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/types.rs @@ -22,7 +22,6 @@ use sc_service::{PartialComponents, TFullBackend, TFullClient}; use sc_telemetry::{Telemetry, TelemetryWorkerHandle}; use sc_transaction_pool::TransactionPoolHandle; use sp_runtime::{generic, traits::BlakeTwo256}; -use std::sync::Arc; pub use parachains_common::{AccountId, Balance, Hash, Nonce}; @@ -42,15 +41,20 @@ pub type ParachainClient = pub type ParachainBackend = TFullBackend; -pub type ParachainBlockImport = - TParachainBlockImport>, ParachainBackend>; +pub type ParachainBlockImport = + TParachainBlockImport>; /// Assembly of PartialComponents (enough to run chain ops subcommands) -pub type ParachainService = PartialComponents< +pub type ParachainService = PartialComponents< ParachainClient, ParachainBackend, (), DefaultImportQueue, TransactionPoolHandle>, - (ParachainBlockImport, Option, Option), + ( + ParachainBlockImport, + Option, + Option, + BIExtraReturnValue, + ), >; diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index ec5d0a439ec4..816f76117a26 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -18,7 +18,10 @@ use crate::{ common::{ aura::{AuraIdT, AuraRuntimeApi}, rpc::BuildParachainRpcExtensions, - spec::{BaseNodeSpec, BuildImportQueue, NodeSpec, StartConsensus}, + spec::{ + BaseNodeSpec, BuildImportQueue, ClientBlockImport, InitBlockImport, NodeSpec, + StartConsensus, + }, types::{ AccountId, Balance, Hash, Nonce, ParachainBackend, ParachainBlockImport, ParachainClient, @@ -30,11 +33,14 @@ use crate::{ use cumulus_client_collator::service::{ CollatorService, ServiceInterface as CollatorServiceInterface, }; -use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; #[docify::export(slot_based_colator_import)] use cumulus_client_consensus_aura::collators::slot_based::{ self as slot_based, Params as SlotBasedParams, }; +use cumulus_client_consensus_aura::collators::{ + lookahead::{self as aura, Params as AuraParams}, + slot_based::{SlotBasedBlockImport, SlotBasedBlockImportHandle}, +}; use cumulus_client_consensus_proposer::{Proposer, ProposerInterface}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] @@ -54,6 +60,7 @@ use sc_service::{Configuration, Error, TaskManager}; use sc_telemetry::TelemetryHandle; use sc_transaction_pool::TransactionPoolHandle; use sp_api::ProvideRuntimeApi; +use sp_core::traits::SpawnNamed; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -90,20 +97,23 @@ where /// Build the import queue for parachain runtimes that started with relay chain consensus and /// switched to aura. -pub(crate) struct BuildRelayToAuraImportQueue( - PhantomData<(Block, RuntimeApi, AuraId)>, +pub(crate) struct BuildRelayToAuraImportQueue( + PhantomData<(Block, RuntimeApi, AuraId, BlockImport)>, ); -impl BuildImportQueue - for BuildRelayToAuraImportQueue +impl + BuildImportQueue + for BuildRelayToAuraImportQueue where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, AuraId: AuraIdT + Sync, + BlockImport: + sc_consensus::BlockImport + Send + Sync + 'static, { fn build_import_queue( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, config: &Configuration, telemetry_handle: Option, task_manager: &TaskManager, @@ -158,20 +168,20 @@ where /// Uses the lookahead collator to support async backing. /// /// Start an aura powered parachain node. Some system chains use this. -pub(crate) struct AuraNode( - pub PhantomData<(Block, RuntimeApi, AuraId, StartConsensus)>, +pub(crate) struct AuraNode( + pub PhantomData<(Block, RuntimeApi, AuraId, StartConsensus, InitBlockImport)>, ); -impl Default - for AuraNode +impl Default + for AuraNode { fn default() -> Self { Self(Default::default()) } } -impl BaseNodeSpec - for AuraNode +impl BaseNodeSpec + for AuraNode where Block: NodeBlock, RuntimeApi: ConstructNodeRuntimeApi>, @@ -179,14 +189,19 @@ where + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi, AuraId: AuraIdT + Sync, + InitBlockImport: self::InitBlockImport + Send, + InitBlockImport::BlockImport: + sc_consensus::BlockImport + 'static, { type Block = Block; type RuntimeApi = RuntimeApi; - type BuildImportQueue = BuildRelayToAuraImportQueue; + type BuildImportQueue = + BuildRelayToAuraImportQueue; + type InitBlockImport = InitBlockImport; } -impl NodeSpec - for AuraNode +impl NodeSpec + for AuraNode where Block: NodeBlock, RuntimeApi: ConstructNodeRuntimeApi>, @@ -194,7 +209,15 @@ where + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi, AuraId: AuraIdT + Sync, - StartConsensus: self::StartConsensus + 'static, + StartConsensus: self::StartConsensus< + Block, + RuntimeApi, + InitBlockImport::BlockImport, + InitBlockImport::BlockImportAuxiliaryData, + > + 'static, + InitBlockImport: self::InitBlockImport + Send, + InitBlockImport::BlockImport: + sc_consensus::BlockImport + 'static, { type BuildRpcExtensions = BuildParachainRpcExtensions; type StartConsensus = StartConsensus; @@ -218,6 +241,7 @@ where RuntimeApi, AuraId, StartSlotBasedAuraConsensus, + StartSlotBasedAuraConsensus, >::default()) } else { Box::new(AuraNode::< @@ -225,6 +249,7 @@ where RuntimeApi, AuraId, StartLookaheadAuraConsensus, + ClientBlockImport, >::default()) } } @@ -242,9 +267,17 @@ where AuraId: AuraIdT + Sync, { #[docify::export_content] - fn launch_slot_based_collator( + fn launch_slot_based_collator( params: SlotBasedParams< - ParachainBlockImport, + Block, + ParachainBlockImport< + Block, + SlotBasedBlockImport< + Block, + Arc>, + ParachainClient, + >, + >, CIDP, ParachainClient, ParachainBackend, @@ -252,33 +285,31 @@ where CHP, Proposer, CS, + Spawner, >, - task_manager: &TaskManager, ) where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, + Spawner: SpawnNamed, { - let (collation_future, block_builder_future) = - slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); - - task_manager.spawn_essential_handle().spawn( - "collation-task", - Some("parachain-block-authoring"), - collation_future, - ); - task_manager.spawn_essential_handle().spawn( - "block-builder-task", - Some("parachain-block-authoring"), - block_builder_future, - ); + slot_based::run::::Pair, _, _, _, _, _, _, _, _, _>(params); } } -impl, RuntimeApi, AuraId> StartConsensus - for StartSlotBasedAuraConsensus +impl, RuntimeApi, AuraId> + StartConsensus< + Block, + RuntimeApi, + SlotBasedBlockImport< + Block, + Arc>, + ParachainClient, + >, + SlotBasedBlockImportHandle, + > for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, @@ -286,7 +317,14 @@ where { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport< + Block, + SlotBasedBlockImport< + Block, + Arc>, + ParachainClient, + >, + >, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, @@ -300,6 +338,7 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, _node_extra_args: NodeExtraArgs, + block_import_handle: SlotBasedBlockImportHandle, ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), @@ -335,16 +374,39 @@ where authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_drift: Duration::from_secs(1), + block_import_handle, + spawner: task_manager.spawn_handle(), }; // We have a separate function only to be able to use `docify::export` on this piece of // code. - Self::launch_slot_based_collator(params, task_manager); + Self::launch_slot_based_collator(params); Ok(()) } } +impl, RuntimeApi, AuraId> InitBlockImport + for StartSlotBasedAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + type BlockImport = SlotBasedBlockImport< + Block, + Arc>, + ParachainClient, + >; + type BlockImportAuxiliaryData = SlotBasedBlockImportHandle; + + fn init_block_import( + client: Arc>, + ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)> { + Ok(SlotBasedBlockImport::new(client.clone(), client)) + } +} + /// Wait for the Aura runtime API to appear on chain. /// This is useful for chains that started out without Aura. Components that /// are depending on Aura functionality will wait until Aura appears in the runtime. @@ -373,7 +435,8 @@ pub(crate) struct StartLookaheadAuraConsensus( PhantomData<(Block, RuntimeApi, AuraId)>, ); -impl, RuntimeApi, AuraId> StartConsensus +impl, RuntimeApi, AuraId> + StartConsensus>, ()> for StartLookaheadAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, @@ -382,7 +445,7 @@ where { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport>>, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, @@ -396,6 +459,7 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, node_extra_args: NodeExtraArgs, + _: (), ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs index 7e36ce735af3..f33865ad45cd 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs @@ -16,28 +16,37 @@ use crate::common::{ rpc::BuildRpcExtensions as BuildRpcExtensionsT, - spec::{BaseNodeSpec, BuildImportQueue, NodeSpec as NodeSpecT}, + spec::{BaseNodeSpec, BuildImportQueue, ClientBlockImport, NodeSpec as NodeSpecT}, types::{Hash, ParachainBlockImport, ParachainClient}, }; use codec::Encode; use cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig}; -use cumulus_primitives_core::ParaId; +use cumulus_primitives_core::{CollectCollationInfo, ParaId}; +use polkadot_primitives::UpgradeGoAhead; use sc_consensus::{DefaultImportQueue, LongestChain}; use sc_consensus_manual_seal::rpc::{ManualSeal, ManualSealApiServer}; use sc_network::NetworkBackend; use sc_service::{Configuration, PartialComponents, TaskManager}; use sc_telemetry::TelemetryHandle; +use sp_api::ProvideRuntimeApi; use sp_runtime::traits::Header; use std::{marker::PhantomData, sync::Arc}; pub struct ManualSealNode(PhantomData); -impl BuildImportQueue - for ManualSealNode +impl + BuildImportQueue< + NodeSpec::Block, + NodeSpec::RuntimeApi, + Arc>, + > for ManualSealNode { fn build_import_queue( client: Arc>, - _block_import: ParachainBlockImport, + _block_import: ParachainBlockImport< + NodeSpec::Block, + Arc>, + >, config: &Configuration, _telemetry_handle: Option, task_manager: &TaskManager, @@ -54,6 +63,7 @@ impl BaseNodeSpec for ManualSealNode { type Block = NodeSpec::Block; type RuntimeApi = NodeSpec::RuntimeApi; type BuildImportQueue = Self; + type InitBlockImport = ClientBlockImport; } impl ManualSealNode { @@ -78,7 +88,7 @@ impl ManualSealNode { keystore_container, select_chain: _, transaction_pool, - other: (_, mut telemetry, _), + other: (_, mut telemetry, _, _), } = Self::new_partial(&config)?; let select_chain = LongestChain::new(backend.clone()); @@ -147,6 +157,18 @@ impl ManualSealNode { .header(block) .expect("Header lookup should succeed") .expect("Header passed in as parent should be present in backend."); + + let should_send_go_ahead = match client_for_cidp + .runtime_api() + .collect_collation_info(block, ¤t_para_head) + { + Ok(info) => info.new_validation_code.is_some(), + Err(e) => { + log::error!("Failed to collect collation info: {:?}", e); + false + }, + }; + let current_para_block_head = Some(polkadot_primitives::HeadData(current_para_head.encode())); let client_for_xcm = client_for_cidp.clone(); @@ -169,6 +191,12 @@ impl ManualSealNode { raw_downward_messages: vec![], raw_horizontal_messages: vec![], additional_key_values: None, + upgrade_go_ahead: should_send_go_ahead.then(|| { + log::info!( + "Detected pending validation code, sending go-ahead signal." + ); + UpgradeGoAhead::GoAhead + }), }; Ok(( // This is intentional, as the runtime that we expect to run against this diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 5520126d0742..3bfb79610448 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true build = "build.rs" description = "Runs a polkadot parachain node" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -76,6 +78,7 @@ runtime-benchmarks = [ "people-rococo-runtime/runtime-benchmarks", "people-westend-runtime/runtime-benchmarks", "rococo-parachain-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "polkadot-omni-node-lib/try-runtime", diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 185b2d40833f..715ce3e1a03e 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Core primitives for Aura in Cumulus" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 533d368d3b00..307860897aec 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Cumulus related core primitive types and traits" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -41,4 +43,5 @@ runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index a4271d3fd9cc..2ff990b8d514 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index e61c865d05fb..6e8168091892 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Hostfunction exposing storage proof size to the runtime." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index e1ae6743335a..3c358bc25edb 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utilities to reclaim storage weight." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index cb328e2f2cc6..70cb3e607b98 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Provides timestamp related functionality for parachains." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 2ca8b82001d5..f26e34a29509 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Helper datatypes for Cumulus" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -50,4 +52,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 863a8fa93f6f..26cf02b3dea9 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -167,7 +167,7 @@ pub fn generate_extrinsic_with_pair( /// Generate an extrinsic from the provided function call, origin and [`Client`]. pub fn generate_extrinsic( client: &Client, - origin: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, function: impl Into, ) -> UncheckedExtrinsic { generate_extrinsic_with_pair(client, origin.into(), function, None) @@ -176,8 +176,8 @@ pub fn generate_extrinsic( /// Transfer some token from one account to another using a provided test [`Client`]. pub fn transfer( client: &Client, - origin: sp_keyring::AccountKeyring, - dest: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, + dest: sp_keyring::Sr25519Keyring, value: Balance, ) -> UncheckedExtrinsic { let function = RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index e266b5807081..c1efa141a45d 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Mocked relay state proof builder for testing Cumulus." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 9234442d399c..2c13d20333a7 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -27,7 +27,10 @@ use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::{ collators::{ lookahead::{self as aura, Params as AuraParams}, - slot_based::{self as slot_based, Params as SlotBasedParams}, + slot_based::{ + self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport, + SlotBasedBlockImportHandle, + }, }, ImportQueueParams, }; @@ -131,7 +134,8 @@ pub type Client = TFullClient; /// The block-import type being used by the test service. -pub type ParachainBlockImport = TParachainBlockImport, Backend>; +pub type ParachainBlockImport = + TParachainBlockImport, Client>, Backend>; /// Transaction pool type used by the test service pub type TransactionPool = Arc>; @@ -184,7 +188,7 @@ pub type Service = PartialComponents< (), sc_consensus::import_queue::BasicQueue, sc_transaction_pool::TransactionPoolHandle, - ParachainBlockImport, + (ParachainBlockImport, SlotBasedBlockImportHandle), >; /// Starts a `ServiceBuilder` for a full service. @@ -217,7 +221,9 @@ pub fn new_partial( )?; let client = Arc::new(client); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let (block_import, slot_based_handle) = + SlotBasedBlockImport::new(client.clone(), client.clone()); + let block_import = ParachainBlockImport::new(block_import, backend.clone()); let transaction_pool = Arc::from( sc_transaction_pool::Builder::new( @@ -260,7 +266,7 @@ pub fn new_partial( task_manager, transaction_pool, select_chain: (), - other: block_import, + other: (block_import, slot_based_handle), }; Ok(params) @@ -349,7 +355,8 @@ where let client = params.client.clone(); let backend = params.backend.clone(); - let block_import = params.other; + let block_import = params.other.0; + let slot_based_handle = params.other.1; let relay_chain_interface = build_relay_chain_interface( relay_chain_config, parachain_config.prometheus_registry(), @@ -497,20 +504,11 @@ where authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_drift: Duration::from_secs(1), + block_import_handle: slot_based_handle, + spawner: task_manager.spawn_handle(), }; - let (collation_future, block_builder_future) = - slot_based::run::(params); - task_manager.spawn_essential_handle().spawn( - "collation-task", - None, - collation_future, - ); - task_manager.spawn_essential_handle().spawn( - "block-builder-task", - None, - block_builder_future, - ); + slot_based::run::(params); } else { tracing::info!(target: LOG_TARGET, "Starting block authoring with lookahead collator."); let params = AuraParams { diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 8598481fae76..d0c637d64d01 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -5,6 +5,8 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/docs/contributor/container.md b/docs/contributor/container.md index ec51b8b9d7cc..e387f568d7b5 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ + docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 0c39367eeed3..0e4a5c001382 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -138,4 +138,5 @@ first-pallet = { workspace = true, default-features = true } [dev-dependencies] assert_cmd = "2.0.14" +cmd_lib = { workspace = true } rand = "0.8" diff --git a/docs/sdk/packages/guides/first-runtime/src/lib.rs b/docs/sdk/packages/guides/first-runtime/src/lib.rs index 61ca550c8750..2ab060c8c43f 100644 --- a/docs/sdk/packages/guides/first-runtime/src/lib.rs +++ b/docs/sdk/packages/guides/first-runtime/src/lib.rs @@ -139,11 +139,11 @@ pub mod genesis_config_presets { let endowment = >::get().max(1) * 1000; build_struct_json_patch!(RuntimeGenesisConfig { balances: BalancesConfig { - balances: AccountKeyring::iter() + balances: Sr25519Keyring::iter() .map(|a| (a.to_account_id(), endowment)) .collect::>(), }, - sudo: SudoConfig { key: Some(AccountKeyring::Alice.to_account_id()) }, + sudo: SudoConfig { key: Some(Sr25519Keyring::Alice.to_account_id()) }, }) } diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs index da37c11c206f..3c782e4793ba 100644 --- a/docs/sdk/src/guides/your_first_node.rs +++ b/docs/sdk/src/guides/your_first_node.rs @@ -103,6 +103,7 @@ #[cfg(test)] mod tests { use assert_cmd::Command; + use cmd_lib::*; use rand::Rng; use sc_chain_spec::{DEV_RUNTIME_PRESET, LOCAL_TESTNET_RUNTIME_PRESET}; use sp_genesis_builder::PresetId; @@ -173,13 +174,10 @@ mod tests { println!("Building polkadot-sdk-docs-first-runtime..."); #[docify::export_content] fn build_runtime() { - Command::new("cargo") - .arg("build") - .arg("--release") - .arg("-p") - .arg(FIRST_RUNTIME) - .assert() - .success(); + run_cmd!( + cargo build --release -p $FIRST_RUNTIME + ) + .expect("Failed to run command"); } build_runtime() } @@ -274,14 +272,10 @@ mod tests { let chain_spec_builder = find_release_binary(&CHAIN_SPEC_BUILDER).unwrap(); let runtime_path = find_wasm(PARA_RUNTIME).unwrap(); let output = "/tmp/demo-chain-spec.json"; - Command::new(chain_spec_builder) - .args(["-c", output]) - .arg("create") - .args(["--para-id", "1000", "--relay-chain", "dontcare"]) - .args(["-r", runtime_path.to_str().unwrap()]) - .args(["named-preset", "development"]) - .assert() - .success(); + let runtime_str = runtime_path.to_str().unwrap(); + run_cmd!( + $chain_spec_builder -c $output create --para-id 1000 --relay-chain dontcare -r $runtime_str named-preset development + ).expect("Failed to run command"); std::fs::remove_file(output).unwrap(); } build_para_chain_spec_works(); diff --git a/docs/sdk/src/reference_docs/chain_spec_genesis.rs b/docs/sdk/src/reference_docs/chain_spec_genesis.rs index b7a0a648d0cf..d5cc482711ad 100644 --- a/docs/sdk/src/reference_docs/chain_spec_genesis.rs +++ b/docs/sdk/src/reference_docs/chain_spec_genesis.rs @@ -174,13 +174,13 @@ //! ``` //! Here are some examples in the form of rust tests: //! ## Listing available preset names: -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", list_presets)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_list_presets)] //! ## Displaying preset with given name -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", get_preset)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_get_preset)] //! ## Building a solo chain-spec (the default) using given preset -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", generate_chain_spec)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_generate_chain_spec)] //! ## Building a parachain chain-spec using given preset -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", generate_para_chain_spec)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_generate_para_chain_spec)] //! //! [`RuntimeGenesisConfig`]: //! chain_spec_guide_runtime::runtime::RuntimeGenesisConfig diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml index 07c0342f5fbe..307e2daa38c6 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml @@ -42,6 +42,7 @@ substrate-wasm-builder = { optional = true, workspace = true, default-features = [dev-dependencies] chain-spec-builder = { workspace = true, default-features = true } +cmd_lib = { workspace = true } sc-chain-spec = { workspace = true, default-features = true } [features] diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs index 5918f2b8ccd5..5432d37e907d 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs @@ -25,7 +25,7 @@ use alloc::vec; use frame_support::build_struct_json_patch; use serde_json::{json, to_string, Value}; use sp_application_crypto::Ss58Codec; -use sp_keyring::AccountKeyring; +use sp_keyring::Sr25519Keyring; /// A demo preset with strings only. pub const PRESET_1: &str = "preset_1"; @@ -70,7 +70,7 @@ fn preset_2() -> Value { some_integer: 200, some_enum: FooEnum::Data2(SomeFooData2 { values: vec![0x0c, 0x10] }) }, - bar: BarConfig { initial_account: Some(AccountKeyring::Ferdie.public().into()) }, + bar: BarConfig { initial_account: Some(Sr25519Keyring::Ferdie.public().into()) }, }) } @@ -80,7 +80,7 @@ fn preset_2() -> Value { fn preset_3() -> Value { json!({ "bar": { - "initialAccount": AccountKeyring::Alice.public().to_ss58check(), + "initialAccount": Sr25519Keyring::Alice.public().to_ss58check(), }, "foo": { "someEnum": FooEnum::Data1( diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs index c2fe5a6727e6..b773af24de80 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs @@ -1,194 +1,192 @@ +use cmd_lib::*; use serde_json::{json, Value}; -use std::{process::Command, str}; +use std::str; -const WASM_FILE_PATH: &str = - "../../../../../target/release/wbuild/chain-spec-guide-runtime/chain_spec_guide_runtime.wasm"; +fn wasm_file_path() -> &'static str { + chain_spec_guide_runtime::runtime::WASM_BINARY_PATH + .expect("chain_spec_guide_runtime wasm should exist. qed") +} const CHAIN_SPEC_BUILDER_PATH: &str = "../../../../../target/release/chain-spec-builder"; +macro_rules! bash( + ( chain-spec-builder $($a:tt)* ) => {{ + let path = get_chain_spec_builder_path(); + spawn_with_output!( + $path $($a)* + ) + .expect("a process running. qed") + .wait_with_output() + .expect("to get output. qed.") + + }} +); + fn get_chain_spec_builder_path() -> &'static str { - // dev-dependencies do not build binary. So let's do the naive work-around here: - let _ = std::process::Command::new("cargo") - .arg("build") - .arg("--release") - .arg("-p") - .arg("staging-chain-spec-builder") - .arg("--bin") - .arg("chain-spec-builder") - .status() - .expect("Failed to execute command"); + run_cmd!( + cargo build --release -p staging-chain-spec-builder --bin chain-spec-builder + ) + .expect("Failed to execute command"); CHAIN_SPEC_BUILDER_PATH } +#[docify::export_content] +fn cmd_list_presets(runtime_path: &str) -> String { + bash!( + chain-spec-builder list-presets -r $runtime_path + ) +} + #[test] -#[docify::export] fn list_presets() { - let output = Command::new(get_chain_spec_builder_path()) - .arg("list-presets") - .arg("-r") - .arg(WASM_FILE_PATH) - .output() - .expect("Failed to execute command"); - - let output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); + let output: serde_json::Value = + serde_json::from_slice(cmd_list_presets(wasm_file_path()).as_bytes()).unwrap(); + assert_eq!( + output, + json!({ + "presets":[ + "preset_1", + "preset_2", + "preset_3", + "preset_4", + "preset_invalid" + ] + }), + "Output did not match expected" + ); +} - let expected_output = json!({ - "presets":[ - "preset_1", - "preset_2", - "preset_3", - "preset_4", - "preset_invalid" - ] - }); - assert_eq!(output, expected_output, "Output did not match expected"); +#[docify::export_content] +fn cmd_get_preset(runtime_path: &str) -> String { + bash!( + chain-spec-builder display-preset -r $runtime_path -p preset_2 + ) } #[test] -#[docify::export] fn get_preset() { - let output = Command::new(get_chain_spec_builder_path()) - .arg("display-preset") - .arg("-r") - .arg(WASM_FILE_PATH) - .arg("-p") - .arg("preset_2") - .output() - .expect("Failed to execute command"); - - let output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); - - //note: copy of chain_spec_guide_runtime::preset_2 - let expected_output = json!({ - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" - } + let output: serde_json::Value = + serde_json::from_slice(cmd_get_preset(wasm_file_path()).as_bytes()).unwrap(); + assert_eq!( + output, + json!({ + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + }, + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + "someInteger": 200 }, - "someInteger": 200 - }, - }); - assert_eq!(output, expected_output, "Output did not match expected"); + }), + "Output did not match expected" + ); +} + +#[docify::export_content] +fn cmd_generate_chain_spec(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c /dev/stdout create -r $runtime_path named-preset preset_2 + ) } #[test] -#[docify::export] fn generate_chain_spec() { - let output = Command::new(get_chain_spec_builder_path()) - .arg("-c") - .arg("/dev/stdout") - .arg("create") - .arg("-r") - .arg(WASM_FILE_PATH) - .arg("named-preset") - .arg("preset_2") - .output() - .expect("Failed to execute command"); - - let mut output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); - - //remove code field for better readability + let mut output: serde_json::Value = + serde_json::from_slice(cmd_generate_chain_spec(wasm_file_path()).as_bytes()).unwrap(); if let Some(code) = output["genesis"]["runtimeGenesis"].as_object_mut().unwrap().get_mut("code") { *code = Value::String("0x123".to_string()); } - - let expected_output = json!({ - "name": "Custom", - "id": "custom", - "chainType": "Live", - "bootNodes": [], - "telemetryEndpoints": null, - "protocolId": null, - "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, - "codeSubstitutes": {}, - "genesis": { - "runtimeGenesis": { - "code": "0x123", - "patch": { - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" - }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" + assert_eq!( + output, + json!({ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "code": "0x123", + "patch": { + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" + }, + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + "someInteger": 200 } - }, - "someInteger": 200 + } } } - } - } - }); - assert_eq!(output, expected_output, "Output did not match expected"); + }), + "Output did not match expected" + ); +} + +#[docify::export_content] +fn cmd_generate_para_chain_spec(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c /dev/stdout create -c polkadot -p 1000 -r $runtime_path named-preset preset_2 + ) } #[test] -#[docify::export] fn generate_para_chain_spec() { - let output = Command::new(get_chain_spec_builder_path()) - .arg("-c") - .arg("/dev/stdout") - .arg("create") - .arg("-c") - .arg("polkadot") - .arg("-p") - .arg("1000") - .arg("-r") - .arg(WASM_FILE_PATH) - .arg("named-preset") - .arg("preset_2") - .output() - .expect("Failed to execute command"); - - let mut output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); - - //remove code field for better readability + let mut output: serde_json::Value = + serde_json::from_slice(cmd_generate_para_chain_spec(wasm_file_path()).as_bytes()).unwrap(); if let Some(code) = output["genesis"]["runtimeGenesis"].as_object_mut().unwrap().get_mut("code") { *code = Value::String("0x123".to_string()); } - - let expected_output = json!({ - "name": "Custom", - "id": "custom", - "chainType": "Live", - "bootNodes": [], - "telemetryEndpoints": null, - "protocolId": null, - "relay_chain": "polkadot", - "para_id": 1000, - "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, - "codeSubstitutes": {}, - "genesis": { - "runtimeGenesis": { - "code": "0x123", - "patch": { - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" - }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" - } + assert_eq!( + output, + json!({ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "relay_chain": "polkadot", + "para_id": 1000, + "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "code": "0x123", + "patch": { + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" }, - "someInteger": 200 + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + "someInteger": 200 + } } } - } - } - }); - assert_eq!(output, expected_output, "Output did not match expected"); + }}), + "Output did not match expected" + ); } #[test] -#[docify::export] +#[docify::export_content] fn preset_4_json() { assert_eq!( chain_spec_guide_runtime::presets::preset_4(), diff --git a/docs/sdk/src/reference_docs/omni_node.rs b/docs/sdk/src/reference_docs/omni_node.rs index 44d63704a458..150755fb29a2 100644 --- a/docs/sdk/src/reference_docs/omni_node.rs +++ b/docs/sdk/src/reference_docs/omni_node.rs @@ -177,9 +177,25 @@ //! [This](https://github.com/paritytech/polkadot-sdk/issues/5565) future improvement to OmniNode //! aims to make such checks automatic. //! +//! ### Runtime conventions +//! +//! The Omni Node needs to make some assumptions about the runtime. During startup, the node fetches +//! the runtime metadata and asserts that the runtime represents a compatible parachain. +//! The checks are best effort and will generate warning level logs in the Omni Node log file on +//! failure. +//! +//! The list of checks may evolve in the future and for now only few rules are implemented: +//! * runtimes must define a type for [`cumulus-pallet-parachain-system`], which is recommended to +//! be named as `ParachainSystem`. +//! * runtimes must define a type for [`frame-system`] pallet, which is recommended to be named as +//! `System`. The configured [`block number`] here will be used by Omni Node to configure AURA +//! accordingly. //! //! [`templates`]: crate::polkadot_sdk::templates //! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk-parachain-template //! [`--dev-block-time`]: polkadot_omni_node_lib::cli::Cli::dev_block_time //! [`polkadot-omni-node`]: https://crates.io/crates/polkadot-omni-node //! [`chain-spec-builder`]: https://crates.io/crates/staging-chain-spec-builder +//! [`cumulus-pallet-parachain-system`]: https://docs.rs/cumulus-pallet-parachain-system/latest/cumulus_pallet_parachain_system/ +//! [`frame-system`]: https://docs.rs/frame-system/latest/frame_system/ +//! [`block number`]: https://docs.rs/frame-system/latest/frame_system/pallet/storage_types/struct.Number.html diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 3a939464868f..101caac0e313 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -20,6 +20,8 @@ authors.workspace = true edition.workspace = true version = "6.0.0" default-run = "polkadot" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index da37f6062c57..3eff525b7b1e 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 42ca27953738..33869f216f78 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -5,6 +5,8 @@ description = "Core Polkadot types used by Relay Chains and parachains." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 969742c5bb0a..528b955c4db3 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -5,6 +5,8 @@ description = "Erasure coding used for Polkadot's availability system" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/grafana/README.md b/polkadot/grafana/README.md index e909fdd29a75..0ecb0b70515b 100644 --- a/polkadot/grafana/README.md +++ b/polkadot/grafana/README.md @@ -90,4 +90,4 @@ and issue statement or initiate dispute. - **Assignment delay tranches**. Approval voting is designed such that validators assigned to check a specific candidate are split up into equal delay tranches (0.5 seconds each). All validators checks are ordered by the delay tranche index. Early tranches of validators have the opportunity to check the candidate first before later tranches -that act as as backups in case of no shows. +that act as backups in case of no shows. diff --git a/polkadot/grafana/parachains/status.json b/polkadot/grafana/parachains/status.json index 5942cbdf4479..22250967848d 100644 --- a/polkadot/grafana/parachains/status.json +++ b/polkadot/grafana/parachains/status.json @@ -1405,7 +1405,7 @@ "type": "prometheus", "uid": "$data_source" }, - "description": "Approval voting requires that validators which are assigned to check a specific \ncandidate are split up into delay tranches (0.5s each). Then, all validators checks are ordered by the delay \ntranche index. Early tranches of validators will check the candidate first and later tranches act as as backups in case of no shows.", + "description": "Approval voting requires that validators which are assigned to check a specific \ncandidate are split up into delay tranches (0.5s each). Then, all validators checks are ordered by the delay \ntranche index. Early tranches of validators will check the candidate first and later tranches act as backups in case of no shows.", "gridPos": { "h": 9, "w": 18, diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 777458673f5b..c1716e2e6eb8 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Collator-side subsystem that handles incoming candidate submissions from the parachain." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/approval-voting-parallel/Cargo.toml b/polkadot/node/core/approval-voting-parallel/Cargo.toml index 3a98cce80e92..995687fb4c11 100644 --- a/polkadot/node/core/approval-voting-parallel/Cargo.toml +++ b/polkadot/node/core/approval-voting-parallel/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem running approval work in parallel" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index f9754d2babc9..80f5dcb7f318 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem of the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs index 372dd49803cb..69278868fa3d 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -264,8 +264,8 @@ fn add_block_entry_adds_child() { fn canonicalize_works() { let (mut db, store) = make_db(); - // -> B1 -> C1 -> D1 - // A -> B2 -> C2 -> D2 + // -> B1 -> C1 -> D1 -> E1 + // A -> B2 -> C2 -> D2 -> E2 // // We'll canonicalize C1. Everything except D1 should disappear. // @@ -293,18 +293,22 @@ fn canonicalize_works() { let block_hash_c2 = Hash::repeat_byte(5); let block_hash_d1 = Hash::repeat_byte(6); let block_hash_d2 = Hash::repeat_byte(7); + let block_hash_e1 = Hash::repeat_byte(8); + let block_hash_e2 = Hash::repeat_byte(9); let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis); let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a); let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a); let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1); let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1); + let candidate_receipt_e1 = make_candidate(ParaId::from(6_u32), block_hash_e1); let cand_hash_1 = candidate_receipt_genesis.hash(); let cand_hash_2 = candidate_receipt_a.hash(); let cand_hash_3 = candidate_receipt_b.hash(); let cand_hash_4 = candidate_receipt_b1.hash(); let cand_hash_5 = candidate_receipt_c1.hash(); + let cand_hash_6 = candidate_receipt_e1.hash(); let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new()); let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new()); @@ -326,6 +330,12 @@ fn canonicalize_works() { let block_entry_d2 = make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]); + let block_entry_e1 = + make_block_entry(block_hash_e1, block_hash_d1, 5, vec![(CoreIndex(0), cand_hash_6)]); + + let block_entry_e2 = + make_block_entry(block_hash_e2, block_hash_d2, 5, vec![(CoreIndex(0), cand_hash_6)]); + let candidate_info = { let mut candidate_info = HashMap::new(); candidate_info.insert( @@ -345,6 +355,8 @@ fn canonicalize_works() { candidate_info .insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None)); + candidate_info + .insert(cand_hash_6, NewCandidateInfo::new(candidate_receipt_e1, GroupIndex(6), None)); candidate_info }; @@ -357,6 +369,8 @@ fn canonicalize_works() { block_entry_c2.clone(), block_entry_d1.clone(), block_entry_d2.clone(), + block_entry_e1.clone(), + block_entry_e2.clone(), ]; let mut overlay_db = OverlayedBackend::new(&db); @@ -438,7 +452,7 @@ fn canonicalize_works() { assert_eq!( load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), - StoredBlockRange(4, 5) + StoredBlockRange(4, 6) ); check_candidates_in_store(vec![ @@ -447,6 +461,7 @@ fn canonicalize_works() { (cand_hash_3, Some(vec![block_hash_d1])), (cand_hash_4, Some(vec![block_hash_d1])), (cand_hash_5, None), + (cand_hash_6, Some(vec![block_hash_e1])), ]); check_blocks_in_store(vec![ @@ -456,6 +471,37 @@ fn canonicalize_works() { (block_hash_c1, None), (block_hash_c2, None), (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_e1, Some(vec![cand_hash_6])), + (block_hash_d2, None), + ]); + + let mut overlay_db = OverlayedBackend::new(&db); + canonicalize(&mut overlay_db, 4, block_hash_d1).unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), + StoredBlockRange(5, 6) + ); + + check_candidates_in_store(vec![ + (cand_hash_1, None), + (cand_hash_2, None), + (cand_hash_3, None), + (cand_hash_4, None), + (cand_hash_5, None), + (cand_hash_6, Some(vec![block_hash_e1])), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, None), + (block_hash_b1, None), + (block_hash_b2, None), + (block_hash_c1, None), + (block_hash_c2, None), + (block_hash_d1, None), + (block_hash_e1, Some(vec![cand_hash_6])), (block_hash_d2, None), ]); } diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 2176cc7675be..7cea22d1a6a7 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -1582,8 +1582,9 @@ async fn handle_actions< session_info_provider, ) .await?; - - approval_voting_sender.send_messages(messages.into_iter()).await; + for message in messages.into_iter() { + approval_voting_sender.send_unbounded_message(message); + } let next_actions: Vec = next_actions.into_iter().map(|v| v.clone()).chain(actions_iter).collect(); @@ -1668,6 +1669,7 @@ async fn distribution_messages_for_activation SubsystemResult<()> { let range = match overlay_db.load_stored_blocks()? { None => return Ok(()), - Some(range) if range.0 >= canon_number => return Ok(()), + Some(range) if range.0 > canon_number => return Ok(()), Some(range) => range, }; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 099ab419dfbf..be569a1de3ec 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -4459,6 +4459,114 @@ async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( assert!(our_assignment.triggered()); } +// Builds a chain with a fork where both relay blocks include the same candidate. +async fn build_chain_with_block_with_two_candidates( + block_hash1: Hash, + slot: Slot, + sync_oracle_handle: TestSyncOracleHandle, + candidate_receipt: Vec, +) -> (ChainBuilder, SessionInfo) { + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some( + candidate_receipt + .iter() + .enumerate() + .map(|(i, receipt)| (receipt.clone(), CoreIndex(i as u32), GroupIndex(i as u32))) + .collect(), + ); + let mut chain_builder = ChainBuilder::new(); + + chain_builder + .major_syncing(sync_oracle_handle.is_major_syncing.clone()) + .add_block( + block_hash1, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + end_syncing: true, + }, + ); + (chain_builder, session_info) +} + +async fn setup_overseer_with_blocks_with_two_assignments_triggered( + virtual_overseer: &mut VirtualOverseer, + store: TestStore, + clock: &Arc, + sync_oracle_handle: TestSyncOracleHandle, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let candidate_hash = candidate_receipt.hash(); + + let mut candidate_commitments2 = CandidateCommitments::default(); + candidate_commitments2.processed_downward_messages = 3; + let mut candidate_receipt2 = dummy_candidate_receipt_v2(block_hash); + candidate_receipt2.commitments_hash = candidate_commitments2.hash(); + let candidate_hash2 = candidate_receipt2.hash(); + + let slot = Slot::from(1); + let (chain_builder, _session_info) = build_chain_with_block_with_two_candidates( + block_hash, + slot, + sync_oracle_handle, + vec![candidate_receipt, candidate_receipt2], + ) + .await; + chain_builder.build(virtual_overseer).await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + let candidate_entry = store.load_candidate_entry(&candidate_hash2).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); +} + // Tests that for candidates that we did not approve yet, for which we triggered the assignment and // the approval work we restart the work to approve it. #[test] @@ -4920,6 +5028,212 @@ fn subsystem_sends_pending_approvals_on_approval_restart() { }); } +// Test that after restart approvals are sent after all assignments have been distributed. +#[test] +fn subsystem_sends_assignment_approval_in_correct_order_on_approval_restart() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(2)].try_into().unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFDelay { + core_index: CoreIndex(1), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_blocks_with_two_assignments_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + // Configure a big coalesce number, so that the signature is cached instead of being sent to + // approval-distribution. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 2, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 2, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); + + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + // On restart we should first distribute all assignments covering a coalesced approval. + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + + let mut candidate_commitments2 = CandidateCommitments::default(); + candidate_commitments2.processed_downward_messages = 3; + let mut candidate_receipt2 = dummy_candidate_receipt_v2(block_hash); + candidate_receipt2.commitments_hash = candidate_commitments2.hash(); + + let slot = Slot::from(1); + + clock.inner.lock().set_tick(slot_to_tick(slot + 2)); + let (chain_builder, _session_info) = build_chain_with_block_with_two_candidates( + block_hash, + slot, + sync_oracle_handle, + vec![candidate_receipt.into(), candidate_receipt2.into()], + ) + .await; + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(approval)) => { + assert_eq!(approval.candidate_indices.count_ones(), 2); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} + // Test we correctly update the timer when we mark the beginning of gathering assignments. #[test] fn test_gathering_assignments_statements() { diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 1d14e4cfba37..9f6864269cef 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index cd1acf9daa93..a81fe9486c63 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as the issuance of statements about candidates." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 126a18a14166..f00ba5712661 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Bitfield signing subsystem for the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 87855dbce415..fea16b1c7604 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index a8e911e0c5c9..0f443868dada 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Chain API subsystem provides access to chain related utility functions like block number to hash conversions." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 755d5cadeaaf..d2cc425a4816 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 344b66af1933..11b4ac645c23 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -5,6 +5,8 @@ description = "The node-side components that participate in disputes" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 1e4953f40d0b..b1cd5e971b00 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Parachains inherent data provider for Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5629e4ef7fbe..ced6c30c64b6 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 64a598b420f7..26dca1adbc79 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -5,6 +5,8 @@ description = "Responsible for assembling a relay chain block from a set of avai authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 73ef17a2843a..cb7e3eadcf0a 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index a9f97c308f26..1b2a16ae8b55 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -38,6 +40,7 @@ polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +sc-tracing = { workspace = true } sp-core = { workspace = true, default-features = true } sp-maybe-compressed-blob = { optional = true, workspace = true, default-features = true } polkadot-node-core-pvf-prepare-worker = { optional = true, workspace = true, default-features = true } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 903c8dd1af29..d058d582fc26 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 6ad340d25612..8327cf8058cd 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 56235bd82192..9dc800a8ef56 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/src/worker_interface.rs b/polkadot/node/core/pvf/src/worker_interface.rs index e63778d4692f..f279fbb53544 100644 --- a/polkadot/node/core/pvf/src/worker_interface.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -237,10 +237,8 @@ impl WorkerHandle { // Clear all env vars from the spawned process. let mut command = process::Command::new(program.as_ref()); command.env_clear(); - // Add back any env vars we want to keep. - if let Ok(value) = std::env::var("RUST_LOG") { - command.env("RUST_LOG", value); - } + + command.env("RUST_LOG", sc_tracing::logging::get_directives().join(",")); let mut child = command .args(extra_args) diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 834e4b300b9e..15cbf4665d06 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -5,6 +5,8 @@ description = "Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index 9b2df435a06a..84875ea121b6 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stick logs together with the TraceID as provided by tempo" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index da6364977cae..b4a3401b15e4 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index 41b08b66e9b4..05344993a75e 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/metrics/src/tests.rs b/polkadot/node/metrics/src/tests.rs index 4760138058eb..43dce0ec2ffe 100644 --- a/polkadot/node/metrics/src/tests.rs +++ b/polkadot/node/metrics/src/tests.rs @@ -21,7 +21,7 @@ use hyper::Uri; use hyper_util::{client::legacy::Client, rt::TokioExecutor}; use polkadot_primitives::metric_definitions::PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED; use polkadot_test_service::{node_config, run_validator_node, test_prometheus_config}; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use std::collections::HashMap; const DEFAULT_PROMETHEUS_PORT: u16 = 9616; diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 8d674a733470..abf345552f89 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Approval Distribution subsystem for the distribution of authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 876cc59b9c28..cefb1d744992 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -163,8 +163,6 @@ enum ApprovalEntryError { InvalidCandidateIndex, DuplicateApproval, UnknownAssignment, - #[allow(dead_code)] - AssignmentsFollowedDifferentPaths(RequiredRouting, RequiredRouting), } impl ApprovalEntry { @@ -318,7 +316,7 @@ impl Default for AggressionConfig { fn default() -> Self { AggressionConfig { l1_threshold: Some(16), - l2_threshold: Some(28), + l2_threshold: Some(64), resend_unfinalized_period: Some(8), } } @@ -514,6 +512,8 @@ struct BlockEntry { vrf_story: RelayVRFStory, /// The block slot. slot: Slot, + /// Backing off from re-sending messages to peers. + last_resent_at_block_number: Option, } impl BlockEntry { @@ -568,7 +568,7 @@ impl BlockEntry { &mut self, approval: IndirectSignedApprovalVoteV2, ) -> Result<(RequiredRouting, HashSet), ApprovalEntryError> { - let mut required_routing = None; + let mut required_routing: Option = None; let mut peers_randomly_routed_to = HashSet::new(); if self.candidates.len() < approval.candidate_indices.len() as usize { @@ -595,16 +595,11 @@ impl BlockEntry { peers_randomly_routed_to .extend(approval_entry.routing_info().peers_randomly_routed.iter()); - if let Some(required_routing) = required_routing { - if required_routing != approval_entry.routing_info().required_routing { - // This shouldn't happen since the required routing is computed based on the - // validator_index, so two assignments from the same validators will have - // the same required routing. - return Err(ApprovalEntryError::AssignmentsFollowedDifferentPaths( - required_routing, - approval_entry.routing_info().required_routing, - )) - } + if let Some(current_required_routing) = required_routing { + required_routing = Some( + current_required_routing + .combine(approval_entry.routing_info().required_routing), + ); } else { required_routing = Some(approval_entry.routing_info().required_routing) } @@ -885,6 +880,7 @@ impl State { candidates_metadata: meta.candidates, vrf_story: meta.vrf_story, slot: meta.slot, + last_resent_at_block_number: None, }); self.topologies.inc_session_refs(meta.session); @@ -1324,6 +1320,33 @@ impl State { self.enable_aggression(network_sender, Resend::No, metrics).await; } + // When finality is lagging as a last resort nodes start sending the messages they have + // multiples times. This means it is safe to accept duplicate messages without punishing the + // peer and reduce the reputation and can end up banning the Peer, which in turn will create + // more no-shows. + fn accept_duplicates_from_validators( + blocks_by_number: &BTreeMap>, + topologies: &SessionGridTopologies, + aggression_config: &AggressionConfig, + entry: &BlockEntry, + peer: PeerId, + ) -> bool { + let topology = topologies.get_topology(entry.session); + let min_age = blocks_by_number.iter().next().map(|(num, _)| num); + let max_age = blocks_by_number.iter().rev().next().map(|(num, _)| num); + + // Return if we don't have at least 1 block. + let (min_age, max_age) = match (min_age, max_age) { + (Some(min), Some(max)) => (*min, *max), + _ => return false, + }; + + let age = max_age.saturating_sub(min_age); + + aggression_config.should_trigger_aggression(age) && + topology.map(|topology| topology.is_validator(&peer)).unwrap_or(false) + } + async fn import_and_circulate_assignment( &mut self, approval_voting_sender: &mut A, @@ -1388,20 +1411,29 @@ impl State { if peer_knowledge.contains(&message_subject, message_kind) { // wasn't included before if !peer_knowledge.received.insert(message_subject.clone(), message_kind) { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Duplicate assignment", - ); - - modify_reputation( - &mut self.reputation, - network_sender, + if !Self::accept_duplicates_from_validators( + &self.blocks_by_number, + &self.topologies, + &self.aggression_config, + entry, peer_id, - COST_DUPLICATE_MESSAGE, - ) - .await; + ) { + gum::debug!( + target: LOG_TARGET, + ?peer_id, + ?message_subject, + "Duplicate assignment", + ); + + modify_reputation( + &mut self.reputation, + network_sender, + peer_id, + COST_DUPLICATE_MESSAGE, + ) + .await; + } + metrics.on_assignment_duplicate(); } else { gum::trace!( @@ -1717,6 +1749,9 @@ impl State { assignments_knowledge_key: &Vec<(MessageSubject, MessageKind)>, approval_knowledge_key: &(MessageSubject, MessageKind), entry: &mut BlockEntry, + blocks_by_number: &BTreeMap>, + topologies: &SessionGridTopologies, + aggression_config: &AggressionConfig, reputation: &mut ReputationAggregator, peer_id: PeerId, metrics: &Metrics, @@ -1745,20 +1780,27 @@ impl State { .received .insert(approval_knowledge_key.0.clone(), approval_knowledge_key.1) { - gum::trace!( - target: LOG_TARGET, - ?peer_id, - ?approval_knowledge_key, - "Duplicate approval", - ); - - modify_reputation( - reputation, - network_sender, + if !Self::accept_duplicates_from_validators( + blocks_by_number, + topologies, + aggression_config, + entry, peer_id, - COST_DUPLICATE_MESSAGE, - ) - .await; + ) { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + ?approval_knowledge_key, + "Duplicate approval", + ); + modify_reputation( + reputation, + network_sender, + peer_id, + COST_DUPLICATE_MESSAGE, + ) + .await; + } metrics.on_approval_duplicate(); } return false @@ -1850,6 +1892,9 @@ impl State { &assignments_knowledge_keys, &approval_knwowledge_key, entry, + &self.blocks_by_number, + &self.topologies, + &self.aggression_config, &mut self.reputation, peer_id, metrics, @@ -2260,18 +2305,43 @@ impl State { &self.topologies, |block_entry| { let block_age = max_age - block_entry.number; + // We want to resend only for blocks of min_age, there is no point in + // resending for blocks newer than that, because we are just going to create load + // and not gain anything. + let diff_from_min_age = block_entry.number - min_age; + + // We want to back-off on resending for blocks that have been resent recently, to + // give time for nodes to process all the extra messages, if we still have not + // finalized we are going to resend again after unfinalized_period * 2 since the + // last resend. + let blocks_since_last_sent = block_entry + .last_resent_at_block_number + .map(|last_resent_at_block_number| max_age - last_resent_at_block_number); + + let can_resend_at_this_age = blocks_since_last_sent + .zip(config.resend_unfinalized_period) + .map(|(blocks_since_last_sent, unfinalized_period)| { + blocks_since_last_sent >= unfinalized_period * 2 + }) + .unwrap_or(true); if resend == Resend::Yes && - config - .resend_unfinalized_period - .as_ref() - .map_or(false, |p| block_age > 0 && block_age % p == 0) - { + config.resend_unfinalized_period.as_ref().map_or(false, |p| { + block_age > 0 && + block_age % p == 0 && diff_from_min_age == 0 && + can_resend_at_this_age + }) { // Retry sending to all peers. for (_, knowledge) in block_entry.known_by.iter_mut() { knowledge.sent = Knowledge::default(); } - + block_entry.last_resent_at_block_number = Some(max_age); + gum::debug!( + target: LOG_TARGET, + block_number = ?block_entry.number, + ?max_age, + "Aggression enabled with resend for block", + ); true } else { false diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 063e71f2f528..323b2cb08fec 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -1030,6 +1030,141 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { ); } +#[test] +fn peer_sending_us_duplicates_while_aggression_enabled_is_ok() { + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; + + let _ = test_harness( + Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), + Arc::new(SystemClock {}), + state_without_reputation_delay(), + |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + let peer = &peer_a; + setup_peer_with_view(overseer, peer, view![], ValidationVersion::V3).await; + + let peers_with_optional_peer_id = peers + .iter() + .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) + .collect_vec(); + // Setup a topology where peer_a is neighbor to current node. + setup_gossip_topology( + overseer, + make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), + ) + .await; + + // new block `hash` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 1], + slot: 1.into(), + session: 1, + vrf_story: RelayVRFStory(Default::default()), + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(0); + let candidate_indices: CandidateBitfield = + vec![0 as CandidateIndex].try_into().unwrap(); + let candidate_bitfields = vec![CoreIndex(0)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_indices.clone(), + ), + ) + .await; + + // update peer view to include the hash + overseer_send( + overseer, + ApprovalDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerViewChange(*peer, view![hash]), + ), + ) + .await; + + // we should send them the assignment + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 1); + assert_eq!(assignments.len(), 1); + } + ); + + // but if someone else is sending it the same assignment + // the peer could send us it as well + let assignments = vec![(cert, candidate_indices)]; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(assignments); + send_message_from_peer_v3(overseer, peer, msg.clone()).await; + + assert!( + overseer.recv().timeout(TIMEOUT).await.is_none(), + "we should not punish the peer" + ); + + // send the assignments again + send_message_from_peer_v3(overseer, peer, msg.clone()).await; + + // now we should + expect_reputation_change(overseer, peer, COST_DUPLICATE_MESSAGE).await; + + // Peers will be continously punished for sending duplicates until approval-distribution + // aggression kicks, at which point they aren't anymore. + let mut parent_hash = hash; + for level in 0..16 { + // As long as the lag is bellow l1 aggression, punish peers for duplicates. + send_message_from_peer_v3(overseer, peer, msg.clone()).await; + expect_reputation_change(overseer, peer, COST_DUPLICATE_MESSAGE).await; + + let number = 1 + level + 1; // first block had number 1 + let hash = BlakeTwo256::hash_of(&(parent_hash, number)); + let meta = BlockApprovalMeta { + hash, + parent_hash, + number, + candidates: vec![], + slot: (level as u64).into(), + session: 1, + vrf_story: RelayVRFStory(Default::default()), + }; + + let msg = ApprovalDistributionMessage::ApprovalCheckingLagUpdate(level + 1); + overseer_send(overseer, msg).await; + + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + parent_hash = hash; + } + + // send the assignments again, we should not punish the peer because aggression is + // enabled. + send_message_from_peer_v3(overseer, peer, msg).await; + + assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); + virtual_overseer + }, + ); +} + #[test] fn import_approval_happy_path_v1_v2_peers() { let peers = make_peers_and_authority_ids(15); @@ -3892,7 +4027,7 @@ fn resends_messages_periodically() { // Add blocks until resend is done. { let mut parent_hash = hash; - for level in 0..2 { + for level in 0..4 { number = number + 1; let hash = BlakeTwo256::hash_of(&(parent_hash, number)); let meta = BlockApprovalMeta { diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 8c5574f244e4..e87103d99f72 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 41f09b1f7044..be4323e74f02 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 6d007255c574..2ff30489b6c1 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Bitfiled Distribution subsystem, which gossips signed av authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index b4b5743853cd..c4b46c1dc001 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -5,6 +5,8 @@ description = "The Network Bridge Subsystem — protocol multiplexer for Polkado authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index 304cb23bb6aa..a51d24c70807 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Collator Protocol subsystem. Allows collators and valida authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 504b0d716043..d77480272cb4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -899,7 +899,7 @@ async fn process_msg( ); } }, - msg @ (ReportCollator(..) | Invalid(..) | Seconded(..)) => { + msg @ (Invalid(..) | Seconded(..)) => { gum::warn!( target: LOG_TARGET, "{:?} message is not expected on the collator side of the protocol", diff --git a/polkadot/node/network/collator-protocol/src/error.rs b/polkadot/node/network/collator-protocol/src/error.rs index ae7f9a8c1fbc..97fd4076bb8f 100644 --- a/polkadot/node/network/collator-protocol/src/error.rs +++ b/polkadot/node/network/collator-protocol/src/error.rs @@ -70,6 +70,9 @@ pub enum Error { #[error("Response receiver for claim queue request cancelled")] CancelledClaimQueue(oneshot::Canceled), + + #[error("No state for the relay parent")] + RelayParentStateNotFound, } /// An error happened on the validator side of the protocol when attempting @@ -122,7 +125,7 @@ impl SecondingError { PersistedValidationDataMismatch | CandidateHashMismatch | RelayParentMismatch | - Duplicate | ParentHeadDataMismatch | + ParentHeadDataMismatch | InvalidCoreIndex(_, _) | InvalidSessionIndex(_, _) | InvalidReceiptVersion(_) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs b/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs new file mode 100644 index 000000000000..3a34cf52fec6 --- /dev/null +++ b/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs @@ -0,0 +1,1055 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! `ClaimQueueState` tracks the state of the claim queue over a set of relay blocks. Refer to +//! [`ClaimQueueState`] for more details. + +use std::collections::VecDeque; + +use crate::LOG_TARGET; +use polkadot_primitives::{Hash, Id as ParaId}; + +/// Represents a single claim from the claim queue, mapped to the relay chain block where it could +/// be backed on-chain. +#[derive(Debug, PartialEq)] +struct ClaimInfo { + // Hash of the relay chain block. Can be `None` if it is still not known (a future block). + hash: Option, + /// Represents the `ParaId` scheduled for the block. Can be `None` if nothing is scheduled. + claim: Option, + /// The length of the claim queue at the block. It is used to determine the 'block window' + /// where a claim can be made. + claim_queue_len: usize, + /// A flag that indicates if the slot is claimed or not. + claimed: bool, +} + +/// Tracks the state of the claim queue over a set of relay blocks. +/// +/// Generally the claim queue represents the `ParaId` that should be scheduled at the current block +/// (the first element of the claim queue) and N other `ParaId`s which are supposed to be scheduled +/// on the next relay blocks. In other words the claim queue is a rolling window giving a hint what +/// should be built/fetched/accepted (depending on the context) at each block. +/// +/// Since the claim queue peeks into the future blocks there is a relation between the claim queue +/// state between the current block and the future blocks. +/// Let's see an example with 2 co-scheduled parachains: +/// - relay parent 1; Claim queue: [A, B, A] +/// - relay parent 2; Claim queue: [B, A, B] +/// - relay parent 3; Claim queue: [A, B, A] +/// - and so on +/// +/// Note that at rp1 the second element in the claim queue is equal to the first one in rp2. Also +/// the third element of the claim queue at rp1 is equal to the second one in rp2 and the first one +/// in rp3. +/// +/// So if we want to claim the third slot at rp 1 we are also claiming the second at rp2 and first +/// at rp3. To track this in a simple way we can project the claim queue onto the relay blocks like +/// this: +/// [A] [B] [A] -> this is the claim queue at rp3 +/// [B] [A] [B] -> this is the claim queue at rp2 +/// [A] [B] [A] -> this is the claim queue at rp1 +/// [RP 1][RP 2][RP 3][RP X][RP Y] -> relay blocks, RP x and RP Y are future blocks +/// +/// Note that the claims at each column are the same so we can simplify this by just projecting a +/// single claim over a block: +/// [A] [B] [A] [B] [A] -> claims effectively are the same +/// [RP 1][RP 2][RP 3][RP X][RP Y] -> relay blocks, RP x and RP Y are future blocks +/// +/// Basically this is how `ClaimQueueState` works. It keeps track of claims at each block by mapping +/// claims to relay blocks. +/// +/// How making a claim works? +/// At each relay block we keep track how long is the claim queue. This is a 'window' where we can +/// make a claim. So adding a claim just looks for a free spot at this window and claims it. +/// +/// Note on adding a new leaf. +/// When a new leaf is added we check if the first element in its claim queue matches with the +/// projection on the first element in 'future blocks'. If yes - the new relay block inherits this +/// claim. If not - this means that the claim queue changed for some reason so the claim can't be +/// inherited. This should not happen under normal circumstances. But if it happens it means that we +/// have got one claim which won't be satisfied in the worst case scenario. +pub(crate) struct ClaimQueueState { + block_state: VecDeque, + future_blocks: VecDeque, +} + +impl ClaimQueueState { + pub(crate) fn new() -> Self { + Self { block_state: VecDeque::new(), future_blocks: VecDeque::new() } + } + + // Appends a new leaf + pub(crate) fn add_leaf(&mut self, hash: &Hash, claim_queue: &Vec) { + if self.block_state.iter().any(|s| s.hash == Some(*hash)) { + return + } + + // First check if our view for the future blocks is consistent with the one in the claim + // queue of the new block. If not - the claim queue has changed for some reason and we need + // to readjust our view. + for (idx, expected_claim) in claim_queue.iter().enumerate() { + match self.future_blocks.get_mut(idx) { + Some(future_block) => + if future_block.claim.as_ref() != Some(expected_claim) { + // There is an inconsistency. Update our view with the one from the claim + // queue. `claimed` can't be true anymore since the `ParaId` has changed. + future_block.claimed = false; + future_block.claim = Some(*expected_claim); + }, + None => { + self.future_blocks.push_back(ClaimInfo { + hash: None, + claim: Some(*expected_claim), + // For future blocks we don't know the size of the claim queue. + // `claim_queue_len` could be an option but there is not much benefit from + // the extra boilerplate code to handle it. We set it to one since we + // usually know about one claim at each future block but this value is not + // used anywhere in the code. + claim_queue_len: 1, + claimed: false, + }); + }, + } + } + + // Now pop the first future block and add it as a leaf + let claim_info = if let Some(new_leaf) = self.future_blocks.pop_front() { + ClaimInfo { + hash: Some(*hash), + claim: claim_queue.first().copied(), + claim_queue_len: claim_queue.len(), + claimed: new_leaf.claimed, + } + } else { + // maybe the claim queue was empty but we still need to add a leaf + ClaimInfo { + hash: Some(*hash), + claim: claim_queue.first().copied(), + claim_queue_len: claim_queue.len(), + claimed: false, + } + }; + + // `future_blocks` can't be longer than the length of the claim queue at the last block - 1. + // For example this can happen if at relay block N we have got a claim queue of a length 4 + // and it's shrunk to 2. + self.future_blocks.truncate(claim_queue.len().saturating_sub(1)); + + self.block_state.push_back(claim_info); + } + + fn get_window<'a>( + &'a mut self, + relay_parent: &'a Hash, + ) -> impl Iterator + 'a { + let mut window = self + .block_state + .iter_mut() + .skip_while(|b| b.hash != Some(*relay_parent)) + .peekable(); + let cq_len = window.peek().map_or(0, |b| b.claim_queue_len); + window.chain(self.future_blocks.iter_mut()).take(cq_len) + } + + pub(crate) fn claim_at(&mut self, relay_parent: &Hash, para_id: &ParaId) -> bool { + gum::trace!( + target: LOG_TARGET, + ?para_id, + ?relay_parent, + "claim_at" + ); + self.find_a_claim(relay_parent, para_id, true) + } + + pub(crate) fn can_claim_at(&mut self, relay_parent: &Hash, para_id: &ParaId) -> bool { + gum::trace!( + target: LOG_TARGET, + ?para_id, + ?relay_parent, + "can_claim_at" + ); + + self.find_a_claim(relay_parent, para_id, false) + } + + // Returns `true` if there is a claim within `relay_parent`'s view of the claim queue for + // `para_id`. If `claim_it` is set to `true` the slot is claimed. Otherwise the function just + // reports the availability of the slot. + fn find_a_claim(&mut self, relay_parent: &Hash, para_id: &ParaId, claim_it: bool) -> bool { + let window = self.get_window(relay_parent); + + for w in window { + gum::trace!( + target: LOG_TARGET, + ?para_id, + ?relay_parent, + claim_info=?w, + ?claim_it, + "Checking claim" + ); + + if !w.claimed && w.claim == Some(*para_id) { + w.claimed = claim_it; + return true + } + } + + false + } + + pub(crate) fn unclaimed_at(&mut self, relay_parent: &Hash) -> Vec { + let window = self.get_window(relay_parent); + + window.filter(|b| !b.claimed).filter_map(|b| b.claim).collect() + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn sane_initial_state() { + let mut state = ClaimQueueState::new(); + let relay_parent = Hash::from_low_u64_be(1); + let para_id = ParaId::new(1); + + assert!(!state.can_claim_at(&relay_parent, ¶_id)); + assert!(!state.claim_at(&relay_parent, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent), vec![]); + } + + #[test] + fn add_leaf_works() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id = ParaId::new(1); + let claim_queue = vec![para_id, para_id, para_id]; + + state.add_leaf(&relay_parent_a, &claim_queue); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: false, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + + // should be no op + state.add_leaf(&relay_parent_a, &claim_queue); + assert_eq!(state.block_state.len(), 1); + assert_eq!(state.future_blocks.len(), 2); + + // add another leaf + let relay_parent_b = Hash::from_low_u64_be(2); + state.add_leaf(&relay_parent_b, &claim_queue); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id), + claim_queue_len: 3, + claimed: false, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id, para_id]); + } + + #[test] + fn claims_at_separate_relay_parents_work() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let relay_parent_b = Hash::from_low_u64_be(2); + let para_id = ParaId::new(1); + let claim_queue = vec![para_id, para_id, para_id]; + + state.add_leaf(&relay_parent_a, &claim_queue); + state.add_leaf(&relay_parent_b, &claim_queue); + + // add one claim for a + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + + // and one for b + assert!(state.can_claim_at(&relay_parent_b, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id, para_id]); + assert!(state.claim_at(&relay_parent_b, ¶_id)); + + // a should have one claim since the one for b was claimed + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id]); + // and two more for b + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + } + + #[test] + fn claims_are_transferred_to_next_slot() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id = ParaId::new(1); + let claim_queue = vec![para_id, para_id, para_id]; + + state.add_leaf(&relay_parent_a, &claim_queue); + + // add two claims, 2nd should be transferred to a new leaf + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id]); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + + // one more + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id]); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } + ]) + ); + + // no more claims + assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + } + + #[test] + fn claims_are_transferred_to_new_leaves() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id = ParaId::new(1); + let claim_queue = vec![para_id, para_id, para_id]; + + state.add_leaf(&relay_parent_a, &claim_queue); + + for _ in 0..3 { + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + } + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } + ]) + ); + + // no more claims + assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); + + // new leaf + let relay_parent_b = Hash::from_low_u64_be(2); + state.add_leaf(&relay_parent_b, &claim_queue); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + + // still no claims for a + assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); + + // but can accept for b + assert!(state.can_claim_at(&relay_parent_b, ¶_id)); + assert!(state.claim_at(&relay_parent_b, ¶_id)); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } + ]) + ); + } + + #[test] + fn two_paras() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue = vec![para_id_a, para_id_b, para_id_a]; + + state.add_leaf(&relay_parent_a, &claim_queue); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_b, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_b]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } + ]) + ); + + assert!(state.claim_at(&relay_parent_a, ¶_id_b)); + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } + ]) + ); + } + + #[test] + fn claim_queue_changes_unexpectedly() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_a = vec![para_id_a, para_id_b, para_id_a]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } + ]) + ); + + let relay_parent_b = Hash::from_low_u64_be(2); + let claim_queue_b = vec![para_id_a, para_id_a, para_id_a]; // should be [b, a, ...] + state.add_leaf(&relay_parent_b, &claim_queue_b); + + // because of the unexpected change in claim queue we lost the claim for paraB and have one + // unclaimed for paraA + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + } + ]) + ); + assert_eq!( + state.future_blocks, + // since the 3rd slot of the claim queue at rp1 is equal to the second one in rp2, this + // claim still exists + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + } + + #[test] + fn claim_queue_changes_unexpectedly_with_two_blocks() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_a = vec![para_id_a, para_id_b, para_id_b]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.claim_at(&relay_parent_a, ¶_id_b)); + assert!(state.claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true } + ]) + ); + + let relay_parent_b = Hash::from_low_u64_be(2); + let claim_queue_b = vec![para_id_a, para_id_a, para_id_a]; // should be [b, b, ...] + state.add_leaf(&relay_parent_b, &claim_queue_b); + + // because of the unexpected change in claim queue we lost both claims for paraB and have + // two unclaimed for paraA + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + } + + #[test] + fn empty_claim_queue() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let claim_queue_a = vec![]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: None, + claim_queue_len: 0, + claimed: false, + },]) + ); + // no claim queue so we know nothing about future blocks + assert!(state.future_blocks.is_empty()); + + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + let relay_parent_b = Hash::from_low_u64_be(2); + let claim_queue_b = vec![para_id_a]; + state.add_leaf(&relay_parent_b, &claim_queue_b); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: None, + claim_queue_len: 0, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false, + }, + ]) + ); + // claim queue with length 1 doesn't say anything about future blocks + assert!(state.future_blocks.is_empty()); + + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert!(state.can_claim_at(&relay_parent_b, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_a]); + assert!(state.claim_at(&relay_parent_b, ¶_id_a)); + + let relay_parent_c = Hash::from_low_u64_be(3); + let claim_queue_c = vec![para_id_a, para_id_a]; + state.add_leaf(&relay_parent_c, &claim_queue_c); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: None, + claim_queue_len: 0, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_c), + claim: Some(para_id_a), + claim_queue_len: 2, + claimed: false, + }, + ]) + ); + // claim queue with length 2 fills only one future block + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false, + },]) + ); + + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + // already claimed + assert!(!state.can_claim_at(&relay_parent_b, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![]); + assert!(!state.claim_at(&relay_parent_b, ¶_id_a)); + + assert!(state.can_claim_at(&relay_parent_c, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_c), vec![para_id_a, para_id_a]); + } + + #[test] + fn claim_queue_becomes_shorter() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_a = vec![para_id_a, para_id_b, para_id_a]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + + let relay_parent_b = Hash::from_low_u64_be(2); + let claim_queue_b = vec![para_id_a, para_id_b]; // should be [b, a] + state.add_leaf(&relay_parent_b, &claim_queue_b); + + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_a, para_id_b]); + // claims for `relay_parent_a` has changed. + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_a, para_id_b]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 2, + claimed: false, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + },]) + ); + } + + #[test] + fn claim_queue_becomes_shorter_and_drops_future_claims() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_a = vec![para_id_a, para_id_b, para_id_a, para_id_b]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + + assert_eq!( + state.unclaimed_at(&relay_parent_a), + vec![para_id_a, para_id_b, para_id_a, para_id_b] + ); + + // We start with claim queue len 4. + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 4, + claimed: false, + },]) + ); + // we have got three future blocks + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + } + ]) + ); + + // The next claim len is 2, so we loose one future block + let relay_parent_b = Hash::from_low_u64_be(2); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_b = vec![para_id_b, para_id_a]; + state.add_leaf(&relay_parent_b, &claim_queue_b); + + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_b, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 4, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_b), + claim_queue_len: 2, + claimed: false, + } + ]) + ); + + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + },]) + ); + } +} diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index cc0de1cb70f6..625140a73966 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -18,16 +18,28 @@ //! //! Usually a path of collations is as follows: //! 1. First, collation must be advertised by collator. -//! 2. If the advertisement was accepted, it's queued for fetch (per relay parent). -//! 3. Once it's requested, the collation is said to be Pending. -//! 4. Pending collation becomes Fetched once received, we send it to backing for validation. -//! 5. If it turns to be invalid or async backing allows seconding another candidate, carry on +//! 2. The validator inspects the claim queue and decides if the collation should be fetched +//! based on the entries there. A parachain can't have more fetched collations than the +//! entries in the claim queue at a specific relay parent. When calculating this limit the +//! validator counts all advertisements within its view not just at the relay parent. +//! 3. If the advertisement was accepted, it's queued for fetch (per relay parent). +//! 4. Once it's requested, the collation is said to be pending fetch +//! (`CollationStatus::Fetching`). +//! 5. Pending fetch collation becomes pending validation +//! (`CollationStatus::WaitingOnValidation`) once received, we send it to backing for +//! validation. +//! 6. If it turns to be invalid or async backing allows seconding another candidate, carry on //! with the next advertisement, otherwise we're done with this relay parent. //! -//! ┌──────────────────────────────────────────┐ -//! └─▶Advertised ─▶ Pending ─▶ Fetched ─▶ Validated - -use std::{collections::VecDeque, future::Future, pin::Pin, task::Poll}; +//! ┌───────────────────────────────────┐ +//! └─▶Waiting ─▶ Fetching ─▶ WaitingOnValidation + +use std::{ + collections::{BTreeMap, VecDeque}, + future::Future, + pin::Pin, + task::Poll, +}; use futures::{future::BoxFuture, FutureExt}; use polkadot_node_network_protocol::{ @@ -36,9 +48,7 @@ use polkadot_node_network_protocol::{ PeerId, }; use polkadot_node_primitives::PoV; -use polkadot_node_subsystem_util::{ - metrics::prometheus::prometheus::HistogramTimer, runtime::ProspectiveParachainsMode, -}; +use polkadot_node_subsystem_util::metrics::prometheus::prometheus::HistogramTimer; use polkadot_primitives::{ vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, CollatorId, Hash, HeadData, Id as ParaId, PersistedValidationData, @@ -187,12 +197,10 @@ pub struct PendingCollationFetch { pub enum CollationStatus { /// We are waiting for a collation to be advertised to us. Waiting, - /// We are currently fetching a collation. - Fetching, + /// We are currently fetching a collation for the specified `ParaId`. + Fetching(ParaId), /// We are waiting that a collation is being validated. WaitingOnValidation, - /// We have seconded a collation. - Seconded, } impl Default for CollationStatus { @@ -202,22 +210,22 @@ impl Default for CollationStatus { } impl CollationStatus { - /// Downgrades to `Waiting`, but only if `self != Seconded`. - fn back_to_waiting(&mut self, relay_parent_mode: ProspectiveParachainsMode) { - match self { - Self::Seconded => - if relay_parent_mode.is_enabled() { - // With async backing enabled it's allowed to - // second more candidates. - *self = Self::Waiting - }, - _ => *self = Self::Waiting, - } + /// Downgrades to `Waiting` + pub fn back_to_waiting(&mut self) { + *self = Self::Waiting } } +/// The number of claims in the claim queue and seconded candidates count for a specific `ParaId`. +#[derive(Default, Debug)] +struct CandidatesStatePerPara { + /// How many collations have been seconded. + pub seconded_per_para: usize, + // Claims in the claim queue for the `ParaId`. + pub claims_per_para: usize, +} + /// Information about collations per relay parent. -#[derive(Default)] pub struct Collations { /// What is the current status in regards to a collation for this relay parent? pub status: CollationStatus, @@ -226,75 +234,89 @@ pub struct Collations { /// This is the currently last started fetch, which did not exceed `MAX_UNSHARED_DOWNLOAD_TIME` /// yet. pub fetching_from: Option<(CollatorId, Option)>, - /// Collation that were advertised to us, but we did not yet fetch. - pub waiting_queue: VecDeque<(PendingCollation, CollatorId)>, - /// How many collations have been seconded. - pub seconded_count: usize, + /// Collation that were advertised to us, but we did not yet request or fetch. Grouped by + /// `ParaId`. + waiting_queue: BTreeMap>, + /// Number of seconded candidates and claims in the claim queue per `ParaId`. + candidates_state: BTreeMap, } impl Collations { + pub(super) fn new(group_assignments: &Vec) -> Self { + let mut candidates_state = BTreeMap::::new(); + + for para_id in group_assignments { + candidates_state.entry(*para_id).or_default().claims_per_para += 1; + } + + Self { + status: Default::default(), + fetching_from: None, + waiting_queue: Default::default(), + candidates_state, + } + } + /// Note a seconded collation for a given para. - pub(super) fn note_seconded(&mut self) { - self.seconded_count += 1 + pub(super) fn note_seconded(&mut self, para_id: ParaId) { + self.candidates_state.entry(para_id).or_default().seconded_per_para += 1; + gum::trace!( + target: LOG_TARGET, + ?para_id, + new_count=self.candidates_state.entry(para_id).or_default().seconded_per_para, + "Note seconded." + ); + self.status.back_to_waiting(); } - /// Returns the next collation to fetch from the `waiting_queue`. + /// Adds a new collation to the waiting queue for the relay parent. This function doesn't + /// perform any limits check. The caller should assure that the collation limit is respected. + pub(super) fn add_to_waiting_queue(&mut self, collation: (PendingCollation, CollatorId)) { + self.waiting_queue.entry(collation.0.para_id).or_default().push_back(collation); + } + + /// Picks a collation to fetch from the waiting queue. + /// When fetching collations we need to ensure that each parachain has got a fair core time + /// share depending on its assignments in the claim queue. This means that the number of + /// collations seconded per parachain should ideally be equal to the number of claims for the + /// particular parachain in the claim queue. /// - /// This will reset the status back to `Waiting` using [`CollationStatus::back_to_waiting`]. + /// To achieve this each seconded collation is mapped to an entry from the claim queue. The next + /// fetch is the first unfulfilled entry from the claim queue for which there is an + /// advertisement. /// - /// Returns `Some(_)` if there is any collation to fetch, the `status` is not `Seconded` and - /// the passed in `finished_one` is the currently `waiting_collation`. - pub(super) fn get_next_collation_to_fetch( + /// `unfulfilled_claim_queue_entries` represents all claim queue entries which are still not + /// fulfilled. + pub(super) fn pick_a_collation_to_fetch( &mut self, - finished_one: &(CollatorId, Option), - relay_parent_mode: ProspectiveParachainsMode, + unfulfilled_claim_queue_entries: Vec, ) -> Option<(PendingCollation, CollatorId)> { - // If finished one does not match waiting_collation, then we already dequeued another fetch - // to replace it. - if let Some((collator_id, maybe_candidate_hash)) = self.fetching_from.as_ref() { - // If a candidate hash was saved previously, `finished_one` must include this too. - if collator_id != &finished_one.0 && - maybe_candidate_hash.map_or(true, |hash| Some(&hash) != finished_one.1.as_ref()) + gum::trace!( + target: LOG_TARGET, + waiting_queue=?self.waiting_queue, + candidates_state=?self.candidates_state, + "Pick a collation to fetch." + ); + + for assignment in unfulfilled_claim_queue_entries { + // if there is an unfulfilled assignment - return it + if let Some(collation) = self + .waiting_queue + .get_mut(&assignment) + .and_then(|collations| collations.pop_front()) { - gum::trace!( - target: LOG_TARGET, - waiting_collation = ?self.fetching_from, - ?finished_one, - "Not proceeding to the next collation - has already been done." - ); - return None + return Some(collation) } } - self.status.back_to_waiting(relay_parent_mode); - - match self.status { - // We don't need to fetch any other collation when we already have seconded one. - CollationStatus::Seconded => None, - CollationStatus::Waiting => - if self.is_seconded_limit_reached(relay_parent_mode) { - None - } else { - self.waiting_queue.pop_front() - }, - CollationStatus::WaitingOnValidation | CollationStatus::Fetching => - unreachable!("We have reset the status above!"), - } + + None } - /// Checks the limit of seconded candidates. - pub(super) fn is_seconded_limit_reached( - &self, - relay_parent_mode: ProspectiveParachainsMode, - ) -> bool { - let seconded_limit = - if let ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } = - relay_parent_mode - { - max_candidate_depth + 1 - } else { - 1 - }; - self.seconded_count >= seconded_limit + pub(super) fn seconded_for_para(&self, para_id: &ParaId) -> usize { + self.candidates_state + .get(¶_id) + .map(|state| state.seconded_per_para) + .unwrap_or_default() } } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 86358f503d04..5f5effcde9a8 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -49,22 +49,25 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, - request_claim_queue, request_session_index_for_child, - runtime::{prospective_parachains_mode, request_node_features, ProspectiveParachainsMode}, + request_async_backing_params, request_claim_queue, request_session_index_for_child, + runtime::{recv_runtime, request_node_features}, }; use polkadot_primitives::{ node_features, - vstaging::{CandidateDescriptorV2, CandidateDescriptorVersion, CoreState}, - CandidateHash, CollatorId, CoreIndex, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, SessionIndex, + vstaging::{CandidateDescriptorV2, CandidateDescriptorVersion}, + AsyncBackingParams, CandidateHash, CollatorId, CoreIndex, Hash, HeadData, Id as ParaId, + OccupiedCoreAssumption, PersistedValidationData, SessionIndex, }; use crate::error::{Error, FetchError, Result, SecondingError}; use self::collation::BlockedCollationId; +use self::claim_queue_state::ClaimQueueState; + use super::{modify_reputation, tick_stream, LOG_TARGET}; +mod claim_queue_state; mod collation; mod metrics; @@ -163,27 +166,19 @@ impl PeerData { fn update_view( &mut self, implicit_view: &ImplicitView, - active_leaves: &HashMap, - per_relay_parent: &HashMap, + active_leaves: &HashMap, new_view: View, ) { let old_view = std::mem::replace(&mut self.view, new_view); if let PeerState::Collating(ref mut peer_state) = self.state { for removed in old_view.difference(&self.view) { - // Remove relay parent advertisements if it went out - // of our (implicit) view. - let keep = per_relay_parent - .get(removed) - .map(|s| { - is_relay_parent_in_implicit_view( - removed, - s.prospective_parachains_mode, - implicit_view, - active_leaves, - peer_state.para_id, - ) - }) - .unwrap_or(false); + // Remove relay parent advertisements if it went out of our (implicit) view. + let keep = is_relay_parent_in_implicit_view( + removed, + implicit_view, + active_leaves, + peer_state.para_id, + ); if !keep { peer_state.advertisements.remove(&removed); @@ -196,8 +191,7 @@ impl PeerData { fn prune_old_advertisements( &mut self, implicit_view: &ImplicitView, - active_leaves: &HashMap, - per_relay_parent: &HashMap, + active_leaves: &HashMap, ) { if let PeerState::Collating(ref mut peer_state) = self.state { peer_state.advertisements.retain(|hash, _| { @@ -205,36 +199,30 @@ impl PeerData { // - Relay parent is an active leaf // - It belongs to allowed ancestry under some leaf // Discard otherwise. - per_relay_parent.get(hash).map_or(false, |s| { - is_relay_parent_in_implicit_view( - hash, - s.prospective_parachains_mode, - implicit_view, - active_leaves, - peer_state.para_id, - ) - }) + is_relay_parent_in_implicit_view( + hash, + implicit_view, + active_leaves, + peer_state.para_id, + ) }); } } - /// Note an advertisement by the collator. Returns `true` if the advertisement was imported - /// successfully. Fails if the advertisement is duplicate, out of view, or the peer has not - /// declared itself a collator. + /// Performs sanity check for an advertisement and notes it as advertised. fn insert_advertisement( &mut self, on_relay_parent: Hash, - relay_parent_mode: ProspectiveParachainsMode, candidate_hash: Option, implicit_view: &ImplicitView, - active_leaves: &HashMap, + active_leaves: &HashMap, + per_relay_parent: &PerRelayParent, ) -> std::result::Result<(CollatorId, ParaId), InsertAdvertisementError> { match self.state { PeerState::Connected(_) => Err(InsertAdvertisementError::UndeclaredCollator), PeerState::Collating(ref mut state) => { if !is_relay_parent_in_implicit_view( &on_relay_parent, - relay_parent_mode, implicit_view, active_leaves, state.para_id, @@ -242,53 +230,41 @@ impl PeerData { return Err(InsertAdvertisementError::OutOfOurView) } - match (relay_parent_mode, candidate_hash) { - (ProspectiveParachainsMode::Disabled, candidate_hash) => { - if state.advertisements.contains_key(&on_relay_parent) { - return Err(InsertAdvertisementError::Duplicate) - } - state - .advertisements - .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); - }, - ( - ProspectiveParachainsMode::Enabled { max_candidate_depth, .. }, - candidate_hash, - ) => { - if let Some(candidate_hash) = candidate_hash { - if state - .advertisements - .get(&on_relay_parent) - .map_or(false, |candidates| candidates.contains(&candidate_hash)) - { - return Err(InsertAdvertisementError::Duplicate) - } - - let candidates = - state.advertisements.entry(on_relay_parent).or_default(); - - if candidates.len() > max_candidate_depth { - return Err(InsertAdvertisementError::PeerLimitReached) - } - candidates.insert(candidate_hash); - } else { - if self.version != CollationVersion::V1 { - gum::error!( - target: LOG_TARGET, - "Programming error, `candidate_hash` can not be `None` \ - for non `V1` networking.", - ); - } - - if state.advertisements.contains_key(&on_relay_parent) { - return Err(InsertAdvertisementError::Duplicate) - } - state - .advertisements - .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); - }; - }, - } + if let Some(candidate_hash) = candidate_hash { + if state + .advertisements + .get(&on_relay_parent) + .map_or(false, |candidates| candidates.contains(&candidate_hash)) + { + return Err(InsertAdvertisementError::Duplicate) + } + + let candidates = state.advertisements.entry(on_relay_parent).or_default(); + + // Current assignments is equal to the length of the claim queue. No honest + // collator should send that many advertisements. + if candidates.len() > per_relay_parent.assignment.current.len() { + return Err(InsertAdvertisementError::PeerLimitReached) + } + + candidates.insert(candidate_hash); + } else { + if self.version != CollationVersion::V1 { + gum::error!( + target: LOG_TARGET, + "Programming error, `candidate_hash` can not be `None` \ + for non `V1` networking.", + ); + } + + if state.advertisements.contains_key(&on_relay_parent) { + return Err(InsertAdvertisementError::Duplicate) + } + + state + .advertisements + .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); + }; state.last_active = Instant::now(); Ok((state.collator_id.clone(), state.para_id)) @@ -369,7 +345,6 @@ struct GroupAssignments { } struct PerRelayParent { - prospective_parachains_mode: ProspectiveParachainsMode, assignment: GroupAssignments, collations: Collations, v2_receipts: bool, @@ -390,11 +365,10 @@ struct State { /// ancestry of some active leaf, then it does support prospective parachains. implicit_view: ImplicitView, - /// All active leaves observed by us, including both that do and do not - /// support prospective parachains. This mapping works as a replacement for + /// All active leaves observed by us. This mapping works as a replacement for /// [`polkadot_node_network_protocol::View`] and can be dropped once the transition /// to asynchronous backing is done. - active_leaves: HashMap, + active_leaves: HashMap, /// State tracked per relay parent. per_relay_parent: HashMap, @@ -437,23 +411,69 @@ struct State { reputation: ReputationAggregator, } +impl State { + // Returns the number of seconded and pending collations for a specific `ParaId`. Pending + // collations are: + // 1. Collations being fetched from a collator. + // 2. Collations waiting for validation from backing subsystem. + // 3. Collations blocked from seconding due to parent not being known by backing subsystem. + fn seconded_and_pending_for_para(&self, relay_parent: &Hash, para_id: &ParaId) -> usize { + let seconded = self + .per_relay_parent + .get(relay_parent) + .map_or(0, |per_relay_parent| per_relay_parent.collations.seconded_for_para(para_id)); + + let pending_fetch = self.per_relay_parent.get(relay_parent).map_or(0, |rp_state| { + match rp_state.collations.status { + CollationStatus::Fetching(pending_para_id) if pending_para_id == *para_id => 1, + _ => 0, + } + }); + + let waiting_for_validation = self + .fetched_candidates + .keys() + .filter(|fc| fc.relay_parent == *relay_parent && fc.para_id == *para_id) + .count(); + + let blocked_from_seconding = + self.blocked_from_seconding.values().fold(0, |acc, blocked_collations| { + acc + blocked_collations + .iter() + .filter(|pc| { + pc.candidate_receipt.descriptor.para_id() == *para_id && + pc.candidate_receipt.descriptor.relay_parent() == *relay_parent + }) + .count() + }); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para_id, + seconded, + pending_fetch, + waiting_for_validation, + blocked_from_seconding, + "Seconded and pending collations for para", + ); + + seconded + pending_fetch + waiting_for_validation + blocked_from_seconding + } +} + fn is_relay_parent_in_implicit_view( relay_parent: &Hash, - relay_parent_mode: ProspectiveParachainsMode, implicit_view: &ImplicitView, - active_leaves: &HashMap, + active_leaves: &HashMap, para_id: ParaId, ) -> bool { - match relay_parent_mode { - ProspectiveParachainsMode::Disabled => active_leaves.contains_key(relay_parent), - ProspectiveParachainsMode::Enabled { .. } => active_leaves.iter().any(|(hash, mode)| { - mode.is_enabled() && - implicit_view - .known_allowed_relay_parents_under(hash, Some(para_id)) - .unwrap_or_default() - .contains(relay_parent) - }), - } + active_leaves.iter().any(|(hash, _)| { + implicit_view + .known_allowed_relay_parents_under(hash, Some(para_id)) + .unwrap_or_default() + .contains(relay_parent) + }) } async fn construct_per_relay_parent( @@ -461,7 +481,6 @@ async fn construct_per_relay_parent( current_assignments: &mut HashMap, keystore: &KeystorePtr, relay_parent: Hash, - relay_parent_mode: ProspectiveParachainsMode, v2_receipts: bool, session_index: SessionIndex, ) -> Result> @@ -479,39 +498,24 @@ where .await .map_err(Error::CancelledValidatorGroups)??; - let cores = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender) - .await - .await - .map_err(Error::CancelledAvailabilityCores)??; - let core_now = if let Some(group) = polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore).and_then( |(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index), ) { - rotation_info.core_for_group(group, cores.len()) + rotation_info.core_for_group(group, groups.len()) } else { gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); return Ok(None) }; - let claim_queue = request_claim_queue(relay_parent, sender) + let mut claim_queue = request_claim_queue(relay_parent, sender) .await .await .map_err(Error::CancelledClaimQueue)??; - let paras_now = cores - .get(core_now.0 as usize) - .and_then(|c| match (c, relay_parent_mode) { - (CoreState::Occupied(_), ProspectiveParachainsMode::Disabled) => None, - ( - CoreState::Occupied(_), - ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, .. }, - ) => None, - _ => claim_queue.get(&core_now).cloned(), - }) - .unwrap_or_else(|| VecDeque::new()); - - for para_id in paras_now.iter() { + let assigned_paras = claim_queue.remove(&core_now).unwrap_or_else(|| VecDeque::new()); + + for para_id in assigned_paras.iter() { let entry = current_assignments.entry(*para_id).or_default(); *entry += 1; if *entry == 1 { @@ -524,10 +528,12 @@ where } } + let assignment = GroupAssignments { current: assigned_paras.into_iter().collect() }; + let collations = Collations::new(&assignment.current); + Ok(Some(PerRelayParent { - prospective_parachains_mode: relay_parent_mode, - assignment: GroupAssignments { current: paras_now.into_iter().collect() }, - collations: Collations::default(), + assignment, + collations, v2_receipts, session_index, current_core: core_now, @@ -655,12 +661,7 @@ fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) { None => return, }; - peer_data.update_view( - &state.implicit_view, - &state.active_leaves, - &state.per_relay_parent, - view, - ); + peer_data.update_view(&state.implicit_view, &state.active_leaves, view); state.collation_requests_cancel_handles.retain(|pc, handle| { let keep = pc.peer_id != peer_id || peer_data.has_advertised(&pc.relay_parent, None); if !keep { @@ -693,7 +694,6 @@ async fn request_collation( .get_mut(&relay_parent) .ok_or(FetchError::RelayParentOutOfView)?; - // Relay parent mode is checked in `handle_advertisement`. let (requests, response_recv) = match (peer_protocol_version, prospective_candidate) { (CollationVersion::V1, _) => { let (req, response_recv) = OutgoingRequest::new( @@ -739,7 +739,7 @@ async fn request_collation( let maybe_candidate_hash = prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); - per_relay_parent.collations.status = CollationStatus::Fetching; + per_relay_parent.collations.status = CollationStatus::Fetching(para_id); per_relay_parent .collations .fetching_from @@ -1050,6 +1050,62 @@ async fn second_unblocked_collations( } } +fn ensure_seconding_limit_is_respected( + relay_parent: &Hash, + para_id: ParaId, + state: &State, +) -> std::result::Result<(), AdvertisementError> { + let paths = state.implicit_view.paths_via_relay_parent(relay_parent); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para_id, + ?paths, + "Checking seconding limit", + ); + + let mut has_claim_at_some_path = false; + for path in paths { + let mut cq_state = ClaimQueueState::new(); + for ancestor in &path { + let seconded_and_pending = state.seconded_and_pending_for_para(&ancestor, ¶_id); + cq_state.add_leaf( + &ancestor, + &state + .per_relay_parent + .get(ancestor) + .ok_or(AdvertisementError::RelayParentUnknown)? + .assignment + .current, + ); + for _ in 0..seconded_and_pending { + cq_state.claim_at(ancestor, ¶_id); + } + } + + if cq_state.can_claim_at(relay_parent, ¶_id) { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para_id, + ?path, + "Seconding limit respected at path", + ); + has_claim_at_some_path = true; + break + } + } + + // If there is a place in the claim queue for the candidate at at least one path we will accept + // it. + if has_claim_at_some_path { + Ok(()) + } else { + Err(AdvertisementError::SecondedLimitReached) + } +} + async fn handle_advertisement( sender: &mut Sender, state: &mut State, @@ -1072,7 +1128,6 @@ where .get(&relay_parent) .ok_or(AdvertisementError::RelayParentUnknown)?; - let relay_parent_mode = per_relay_parent.prospective_parachains_mode; let assignment = &per_relay_parent.assignment; let collator_para_id = @@ -1088,32 +1143,29 @@ where let (collator_id, para_id) = peer_data .insert_advertisement( relay_parent, - relay_parent_mode, candidate_hash, &state.implicit_view, &state.active_leaves, + &per_relay_parent, ) .map_err(AdvertisementError::Invalid)?; - if per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { - return Err(AdvertisementError::SecondedLimitReached) - } + ensure_seconding_limit_is_respected(&relay_parent, para_id, state)?; if let Some((candidate_hash, parent_head_data_hash)) = prospective_candidate { // Check if backing subsystem allows to second this candidate. // // This is also only important when async backing or elastic scaling is enabled. - let seconding_not_allowed = relay_parent_mode.is_enabled() && - !can_second( - sender, - collator_para_id, - relay_parent, - candidate_hash, - parent_head_data_hash, - ) - .await; + let can_second = can_second( + sender, + collator_para_id, + relay_parent, + candidate_hash, + parent_head_data_hash, + ) + .await; - if seconding_not_allowed { + if !can_second { return Err(AdvertisementError::BlockedByBacking) } } @@ -1143,8 +1195,8 @@ where Ok(()) } -/// Enqueue collation for fetching. The advertisement is expected to be -/// validated. +/// Enqueue collation for fetching. The advertisement is expected to be validated and the seconding +/// limit checked. async fn enqueue_collation( sender: &mut Sender, state: &mut State, @@ -1179,7 +1231,6 @@ where return Ok(()) }, }; - let relay_parent_mode = per_relay_parent.prospective_parachains_mode; let prospective_candidate = prospective_candidate.map(|(candidate_hash, parent_head_data_hash)| ProspectiveCandidate { candidate_hash, @@ -1187,22 +1238,11 @@ where }); let collations = &mut per_relay_parent.collations; - if collations.is_seconded_limit_reached(relay_parent_mode) { - gum::trace!( - target: LOG_TARGET, - peer_id = ?peer_id, - %para_id, - ?relay_parent, - "Limit of seconded collations reached for valid advertisement", - ); - return Ok(()) - } - let pending_collation = PendingCollation::new(relay_parent, para_id, &peer_id, prospective_candidate); match collations.status { - CollationStatus::Fetching | CollationStatus::WaitingOnValidation => { + CollationStatus::Fetching(_) | CollationStatus::WaitingOnValidation => { gum::trace!( target: LOG_TARGET, peer_id = ?peer_id, @@ -1210,26 +1250,13 @@ where ?relay_parent, "Added collation to the pending list" ); - collations.waiting_queue.push_back((pending_collation, collator_id)); + collations.add_to_waiting_queue((pending_collation, collator_id)); }, CollationStatus::Waiting => { + // We were waiting for a collation to be advertised to us (we were idle) so we can fetch + // the new collation immediately fetch_collation(sender, state, pending_collation, collator_id).await?; }, - CollationStatus::Seconded if relay_parent_mode.is_enabled() => { - // Limit is not reached, it's allowed to second another - // collation. - fetch_collation(sender, state, pending_collation, collator_id).await?; - }, - CollationStatus::Seconded => { - gum::trace!( - target: LOG_TARGET, - peer_id = ?peer_id, - %para_id, - ?relay_parent, - ?relay_parent_mode, - "A collation has already been seconded", - ); - }, } Ok(()) @@ -1255,7 +1282,10 @@ where .await .await .map_err(Error::CancelledSessionIndex)??; - let mode = prospective_parachains_mode(sender, *leaf).await?; + + let async_backing_params = + recv_runtime(request_async_backing_params(*leaf, sender).await).await?; + let v2_receipts = request_node_features(*leaf, session_index, sender) .await? .unwrap_or_default() @@ -1268,7 +1298,6 @@ where &mut state.current_assignments, keystore, *leaf, - mode, v2_receipts, session_index, ) @@ -1277,53 +1306,53 @@ where continue }; - state.active_leaves.insert(*leaf, mode); + state.active_leaves.insert(*leaf, async_backing_params); state.per_relay_parent.insert(*leaf, per_relay_parent); - if mode.is_enabled() { - state - .implicit_view - .activate_leaf(sender, *leaf) - .await - .map_err(Error::ImplicitViewFetchError)?; - - // Order is always descending. - let allowed_ancestry = state - .implicit_view - .known_allowed_relay_parents_under(leaf, None) - .unwrap_or_default(); - for block_hash in allowed_ancestry { - if let Entry::Vacant(entry) = state.per_relay_parent.entry(*block_hash) { - // Safe to use the same v2 receipts config for the allowed relay parents as well - // as the same session index since they must be in the same session. - if let Some(per_relay_parent) = construct_per_relay_parent( - sender, - &mut state.current_assignments, - keystore, - *block_hash, - mode, - v2_receipts, - session_index, - ) - .await? - { - entry.insert(per_relay_parent); - } + state + .implicit_view + .activate_leaf(sender, *leaf) + .await + .map_err(Error::ImplicitViewFetchError)?; + + // Order is always descending. + let allowed_ancestry = state + .implicit_view + .known_allowed_relay_parents_under(leaf, None) + .unwrap_or_default(); + for block_hash in allowed_ancestry { + if let Entry::Vacant(entry) = state.per_relay_parent.entry(*block_hash) { + // Safe to use the same v2 receipts config for the allowed relay parents as well + // as the same session index since they must be in the same session. + if let Some(per_relay_parent) = construct_per_relay_parent( + sender, + &mut state.current_assignments, + keystore, + *block_hash, + v2_receipts, + session_index, + ) + .await? + { + entry.insert(per_relay_parent); } } } } - for (removed, mode) in removed { + for (removed, _) in removed { + gum::trace!( + target: LOG_TARGET, + ?view, + ?removed, + "handle_our_view_change - removed", + ); + state.active_leaves.remove(removed); // If the leaf is deactivated it still may stay in the view as a part // of implicit ancestry. Only update the state after the hash is actually // pruned from the block info storage. - let pruned = if mode.is_enabled() { - state.implicit_view.deactivate_leaf(*removed) - } else { - vec![*removed] - }; + let pruned = state.implicit_view.deactivate_leaf(*removed); for removed in pruned { if let Some(per_relay_parent) = state.per_relay_parent.remove(&removed) { @@ -1353,11 +1382,7 @@ where }); for (peer_id, peer_data) in state.peer_data.iter_mut() { - peer_data.prune_old_advertisements( - &state.implicit_view, - &state.active_leaves, - &state.per_relay_parent, - ); + peer_data.prune_old_advertisements(&state.implicit_view, &state.active_leaves); // Disconnect peers who are not relevant to our current or next para. // @@ -1462,9 +1487,6 @@ async fn process_msg( "DistributeCollation message is not expected on the validator side of the protocol", ); }, - ReportCollator(id) => { - report_collator(&mut state.reputation, ctx.sender(), &state.peer_data, id).await; - }, NetworkBridgeUpdate(event) => { if let Err(e) = handle_network_msg(ctx, state, keystore, event).await { gum::warn!( @@ -1493,8 +1515,9 @@ async fn process_msg( if let Some(CollationEvent { collator_id, pending_collation, .. }) = state.fetched_candidates.remove(&fetched_collation) { - let PendingCollation { relay_parent, peer_id, prospective_candidate, .. } = - pending_collation; + let PendingCollation { + relay_parent, peer_id, prospective_candidate, para_id, .. + } = pending_collation; note_good_collation( &mut state.reputation, ctx.sender(), @@ -1514,8 +1537,7 @@ async fn process_msg( } if let Some(rp_state) = state.per_relay_parent.get_mut(&parent) { - rp_state.collations.status = CollationStatus::Seconded; - rp_state.collations.note_seconded(); + rp_state.collations.note_seconded(para_id); } // See if we've unblocked other collations for seconding. @@ -1644,6 +1666,7 @@ async fn run_inner( disconnect_inactive_peers(ctx.sender(), &eviction_policy, &state.peer_data).await; }, resp = state.collation_requests.select_next_some() => { + let relay_parent = resp.0.pending_collation.relay_parent; let res = match handle_collation_fetch_response( &mut state, resp, @@ -1652,9 +1675,17 @@ async fn run_inner( ).await { Err(Some((peer_id, rep))) => { modify_reputation(&mut state.reputation, ctx.sender(), peer_id, rep).await; + // Reset the status for the relay parent + state.per_relay_parent.get_mut(&relay_parent).map(|rp| { + rp.collations.status.back_to_waiting(); + }); continue }, Err(None) => { + // Reset the status for the relay parent + state.per_relay_parent.get_mut(&relay_parent).map(|rp| { + rp.collations.status.back_to_waiting(); + }); continue }, Ok(res) => res @@ -1733,11 +1764,7 @@ async fn dequeue_next_collation_and_fetch( // The collator we tried to fetch from last, optionally which candidate. previous_fetch: (CollatorId, Option), ) { - while let Some((next, id)) = state.per_relay_parent.get_mut(&relay_parent).and_then(|state| { - state - .collations - .get_next_collation_to_fetch(&previous_fetch, state.prospective_parachains_mode) - }) { + while let Some((next, id)) = get_next_collation_to_fetch(&previous_fetch, relay_parent, state) { gum::debug!( target: LOG_TARGET, ?relay_parent, @@ -1846,9 +1873,7 @@ async fn kick_off_seconding( collation_event.collator_protocol_version, collation_event.pending_collation.prospective_candidate, ) { - (CollationVersion::V2, Some(ProspectiveCandidate { parent_head_data_hash, .. })) - if per_relay_parent.prospective_parachains_mode.is_enabled() => - { + (CollationVersion::V2, Some(ProspectiveCandidate { parent_head_data_hash, .. })) => { let pvd = request_prospective_validation_data( ctx.sender(), relay_parent, @@ -1860,8 +1885,7 @@ async fn kick_off_seconding( (pvd, maybe_parent_head_data, Some(parent_head_data_hash)) }, - // Support V2 collators without async backing enabled. - (CollationVersion::V2, Some(_)) | (CollationVersion::V1, _) => { + (CollationVersion::V1, _) => { let pvd = request_persisted_validation_data( ctx.sender(), candidate_receipt.descriptor().relay_parent(), @@ -2110,6 +2134,106 @@ async fn handle_collation_fetch_response( result } +// Returns the claim queue without fetched or pending advertisement. The resulting `Vec` keeps the +// order in the claim queue so the earlier an element is located in the `Vec` the higher its +// priority is. +fn unfulfilled_claim_queue_entries(relay_parent: &Hash, state: &State) -> Result> { + let relay_parent_state = state + .per_relay_parent + .get(relay_parent) + .ok_or(Error::RelayParentStateNotFound)?; + let scheduled_paras = relay_parent_state.assignment.current.iter().collect::>(); + let paths = state.implicit_view.paths_via_relay_parent(relay_parent); + + let mut claim_queue_states = Vec::new(); + for path in paths { + let mut cq_state = ClaimQueueState::new(); + for ancestor in &path { + cq_state.add_leaf( + &ancestor, + &state + .per_relay_parent + .get(&ancestor) + .ok_or(Error::RelayParentStateNotFound)? + .assignment + .current, + ); + + for para_id in &scheduled_paras { + let seconded_and_pending = state.seconded_and_pending_for_para(&ancestor, ¶_id); + for _ in 0..seconded_and_pending { + cq_state.claim_at(&ancestor, ¶_id); + } + } + } + claim_queue_states.push(cq_state); + } + + // From the claim queue state for each leaf we have to return a combined single one. Go for a + // simple solution and return the longest one. In theory we always prefer the earliest entries + // in the claim queue so there is a good chance that the longest path is the one with + // unsatisfied entries in the beginning. This is not guaranteed as we might have fetched 2nd or + // 3rd spot from the claim queue but it should be good enough. + let unfulfilled_entries = claim_queue_states + .iter_mut() + .map(|cq| cq.unclaimed_at(relay_parent)) + .max_by(|a, b| a.len().cmp(&b.len())) + .unwrap_or_default(); + + Ok(unfulfilled_entries) +} + +/// Returns the next collation to fetch from the `waiting_queue` and reset the status back to +/// `Waiting`. +fn get_next_collation_to_fetch( + finished_one: &(CollatorId, Option), + relay_parent: Hash, + state: &mut State, +) -> Option<(PendingCollation, CollatorId)> { + let unfulfilled_entries = match unfulfilled_claim_queue_entries(&relay_parent, &state) { + Ok(entries) => entries, + Err(err) => { + gum::error!( + target: LOG_TARGET, + ?relay_parent, + ?err, + "Failed to get unfulfilled claim queue entries" + ); + return None + }, + }; + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { + Some(rp_state) => rp_state, + None => { + gum::error!( + target: LOG_TARGET, + ?relay_parent, + "Failed to get relay parent state" + ); + return None + }, + }; + + // If finished one does not match waiting_collation, then we already dequeued another fetch + // to replace it. + if let Some((collator_id, maybe_candidate_hash)) = rp_state.collations.fetching_from.as_ref() { + // If a candidate hash was saved previously, `finished_one` must include this too. + if collator_id != &finished_one.0 && + maybe_candidate_hash.map_or(true, |hash| Some(&hash) != finished_one.1.as_ref()) + { + gum::trace!( + target: LOG_TARGET, + waiting_collation = ?rp_state.collations.fetching_from, + ?finished_one, + "Not proceeding to the next collation - has already been done." + ); + return None + } + } + rp_state.collations.status.back_to_waiting(); + rp_state.collations.pick_a_collation_to_fetch(unfulfilled_entries) +} + // Sanity check the candidate descriptor version. fn descriptor_version_sanity_check( descriptor: &CandidateDescriptorV2, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 7bc61dd4ebec..5a2e135419dd 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -28,28 +28,24 @@ use std::{ time::Duration, }; +use self::prospective_parachains::update_view; use polkadot_node_network_protocol::{ - our_view, peer_set::CollationVersion, request_response::{Requests, ResponseSender}, ObservedRole, }; use polkadot_node_primitives::{BlockData, PoV}; -use polkadot_node_subsystem::{ - errors::RuntimeApiError, - messages::{AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest}, +use polkadot_node_subsystem::messages::{ + AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - node_features, - vstaging::{CandidateReceiptV2 as CandidateReceipt, CoreState, OccupiedCore}, - CollatorPair, CoreIndex, GroupIndex, GroupRotationInfo, HeadData, NodeFeatures, - PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, -}; -use polkadot_primitives_test_helpers::{ - dummy_candidate_descriptor, dummy_candidate_receipt_bad_sig, dummy_hash, + node_features, vstaging::CandidateReceiptV2 as CandidateReceipt, AsyncBackingParams, + CollatorPair, CoreIndex, GroupRotationInfo, HeadData, NodeFeatures, PersistedValidationData, + ValidatorId, ValidatorIndex, }; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_hash}; mod prospective_parachains; @@ -57,9 +53,6 @@ const ACTIVITY_TIMEOUT: Duration = Duration::from_millis(500); const DECLARE_TIMEOUT: Duration = Duration::from_millis(25); const REPUTATION_CHANGE_TEST_INTERVAL: Duration = Duration::from_millis(10); -const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = - RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; - fn dummy_pvd() -> PersistedValidationData { PersistedValidationData { parent_head: HeadData(vec![7, 8, 9]), @@ -77,19 +70,17 @@ struct TestState { validator_public: Vec, validator_groups: Vec>, group_rotation_info: GroupRotationInfo, - cores: Vec, claim_queue: BTreeMap>, + async_backing_params: AsyncBackingParams, node_features: NodeFeatures, session_index: SessionIndex, + // Used by `update_view` to keep track of latest requested ancestor + last_known_block: Option, } impl Default for TestState { fn default() -> Self { - let chain_a = ParaId::from(1); - let chain_b = ParaId::from(2); - - let chain_ids = vec![chain_a, chain_b]; - let relay_parent = Hash::repeat_byte(0x05); + let relay_parent = Hash::from_low_u64_be(0x05); let collators = iter::repeat(()).map(|_| CollatorPair::generate().0).take(5).collect(); let validators = vec![ @@ -110,50 +101,103 @@ impl Default for TestState { let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 1, now: 0 }; - let cores = vec![ - CoreState::Scheduled(ScheduledCore { para_id: chain_ids[0], collator: None }), - CoreState::Free, - CoreState::Occupied(OccupiedCore { - next_up_on_available: Some(ScheduledCore { para_id: chain_ids[1], collator: None }), - occupied_since: 0, - time_out_at: 1, - next_up_on_time_out: None, - availability: Default::default(), - group_responsible: GroupIndex(0), - candidate_hash: Default::default(), - candidate_descriptor: { - let mut d = dummy_candidate_descriptor(dummy_hash()); - d.para_id = chain_ids[1]; - - d.into() - }, - }), - ]; - let mut claim_queue = BTreeMap::new(); - claim_queue.insert(CoreIndex(0), [chain_ids[0]].into_iter().collect()); + claim_queue.insert( + CoreIndex(0), + iter::repeat(ParaId::from(Self::CHAIN_IDS[0])) + .take(Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize) + .collect(), + ); claim_queue.insert(CoreIndex(1), VecDeque::new()); - claim_queue.insert(CoreIndex(2), [chain_ids[1]].into_iter().collect()); + claim_queue.insert( + CoreIndex(2), + iter::repeat(ParaId::from(Self::CHAIN_IDS[1])) + .take(Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize) + .collect(), + ); let mut node_features = NodeFeatures::EMPTY; node_features.resize(node_features::FeatureIndex::CandidateReceiptV2 as usize + 1, false); node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true); Self { - chain_ids, + chain_ids: Self::CHAIN_IDS.map(|id| ParaId::from(id)).to_vec(), relay_parent, collators, validator_public, validator_groups, group_rotation_info, - cores, claim_queue, + async_backing_params: Self::ASYNC_BACKING_PARAMS, node_features, session_index: 1, + last_known_block: None, } } } +impl TestState { + const CHAIN_IDS: [u32; 2] = [1, 2]; + const ASYNC_BACKING_PARAMS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; + + fn with_shared_core() -> Self { + let mut state = Self::default(); + + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ + ParaId::from(Self::CHAIN_IDS[1]), + ParaId::from(Self::CHAIN_IDS[0]), + ParaId::from(Self::CHAIN_IDS[0]), + ] + .into_iter(), + ), + ); + state.validator_groups.truncate(1); + + assert!( + claim_queue.get(&CoreIndex(0)).unwrap().len() == + Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize + ); + + state.claim_queue = claim_queue; + + state + } + + fn with_one_scheduled_para() -> Self { + let mut state = Self::default(); + + let validator_groups = vec![vec![ValidatorIndex(0), ValidatorIndex(1)]]; + + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ + ParaId::from(Self::CHAIN_IDS[0]), + ParaId::from(Self::CHAIN_IDS[0]), + ParaId::from(Self::CHAIN_IDS[0]), + ] + .into_iter(), + ), + ); + + assert!( + claim_queue.get(&CoreIndex(0)).unwrap().len() == + Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize + ); + + state.validator_groups = validator_groups; + state.claim_queue = claim_queue; + + state + } +} + type VirtualOverseer = polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; @@ -246,91 +290,6 @@ async fn overseer_signal(overseer: &mut VirtualOverseer, signal: OverseerSignal) .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); } -async fn respond_to_runtime_api_queries( - virtual_overseer: &mut VirtualOverseer, - test_state: &TestState, - hash: Hash, -) { - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - rp, - RuntimeApiRequest::SessionIndexForChild(tx) - )) => { - assert_eq!(rp, hash); - tx.send(Ok(test_state.session_index)).unwrap(); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - rp, - RuntimeApiRequest::AsyncBackingParams(tx) - )) => { - assert_eq!(rp, hash); - tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - rp, - RuntimeApiRequest::NodeFeatures(_, tx) - )) => { - assert_eq!(rp, hash); - tx.send(Ok(test_state.node_features.clone())).unwrap(); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::Validators(tx), - )) => { - let _ = tx.send(Ok(test_state.validator_public.clone())); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - rp, - RuntimeApiRequest::ValidatorGroups(tx), - )) => { - assert_eq!(rp, hash); - let _ = tx.send(Ok(( - test_state.validator_groups.clone(), - test_state.group_rotation_info.clone(), - ))); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - rp, - RuntimeApiRequest::AvailabilityCores(tx), - )) => { - assert_eq!(rp, hash); - let _ = tx.send(Ok(test_state.cores.clone())); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - rp, - RuntimeApiRequest::ClaimQueue(tx), - )) => { - assert_eq!(rp, hash); - let _ = tx.send(Ok(test_state.claim_queue.clone())); - } - ); -} - /// Assert that the next message is a `CandidateBacking(Second())`. async fn assert_candidate_backing_second( virtual_overseer: &mut VirtualOverseer, @@ -506,198 +465,6 @@ async fn advertise_collation( .await; } -// As we receive a relevant advertisement act on it and issue a collation request. -#[test] -fn act_on_advertisement() { - let test_state = TestState::default(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; - - let pair = CollatorPair::generate().0; - gum::trace!("activating"); - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; - - let peer_b = PeerId::random(); - - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - pair.clone(), - test_state.chain_ids[0], - CollationVersion::V1, - ) - .await; - - advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; - - assert_fetch_collation_request( - &mut virtual_overseer, - test_state.relay_parent, - test_state.chain_ids[0], - None, - ) - .await; - - virtual_overseer - }); -} - -/// Tests that validator side works with v2 network protocol -/// before async backing is enabled. -#[test] -fn act_on_advertisement_v2() { - let test_state = TestState::default(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; - - let pair = CollatorPair::generate().0; - gum::trace!("activating"); - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; - - let peer_b = PeerId::random(); - - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - pair.clone(), - test_state.chain_ids[0], - CollationVersion::V2, - ) - .await; - - let pov = PoV { block_data: BlockData(vec![]) }; - let mut candidate_a = - dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); - candidate_a.descriptor.para_id = test_state.chain_ids[0]; - candidate_a.descriptor.relay_parent = test_state.relay_parent; - candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); - - let candidate_hash = candidate_a.hash(); - let parent_head_data_hash = Hash::zero(); - // v2 advertisement. - advertise_collation( - &mut virtual_overseer, - peer_b, - test_state.relay_parent, - Some((candidate_hash, parent_head_data_hash)), - ) - .await; - - let response_channel = assert_fetch_collation_request( - &mut virtual_overseer, - test_state.relay_parent, - test_state.chain_ids[0], - Some(candidate_hash), - ) - .await; - - response_channel - .send(Ok(( - request_v1::CollationFetchingResponse::Collation( - candidate_a.clone().into(), - pov.clone(), - ) - .encode(), - ProtocolName::from(""), - ))) - .expect("Sending response should succeed"); - - assert_candidate_backing_second( - &mut virtual_overseer, - test_state.relay_parent, - test_state.chain_ids[0], - &pov, - // Async backing isn't enabled and thus it should do it the old way. - CollationVersion::V1, - ) - .await; - - virtual_overseer - }); -} - -// Test that other subsystems may modify collators' reputations. -#[test] -fn collator_reporting_works() { - let test_state = TestState::default(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; - - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); - - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - test_state.collators[0].clone(), - test_state.chain_ids[0], - CollationVersion::V1, - ) - .await; - - connect_and_declare_collator( - &mut virtual_overseer, - peer_c, - test_state.collators[1].clone(), - test_state.chain_ids[0], - CollationVersion::V1, - ) - .await; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::ReportCollator(test_state.collators[0].public()), - ) - .await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridgeTx( - NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)), - ) => { - assert_eq!(peer, peer_b); - assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); - } - ); - - virtual_overseer - }); -} - // Test that we verify the signatures on `Declare` and `AdvertiseCollation` messages. #[test] fn collator_authentication_verification_works() { @@ -747,31 +514,18 @@ fn collator_authentication_verification_works() { }); } -/// Tests that a validator fetches only one collation at any moment of time -/// per relay parent and ignores other advertisements once a candidate gets -/// seconded. +/// Tests that on a V1 Advertisement a validator fetches only one collation at any moment of time +/// per relay parent and ignores other V1 advertisements once a candidate gets seconded. #[test] -fn fetch_one_collation_at_a_time() { - let test_state = TestState::default(); +fn fetch_one_collation_at_a_time_for_v1_advertisement() { + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let second = Hash::random(); - - let our_view = our_view![test_state.relay_parent, second]; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view.clone(), - )), - ) - .await; - - // Iter over view since the order may change due to sorted invariant. - for hash in our_view.iter() { - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, *hash).await; - } + let second = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0), (second, 1)]) + .await; let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -794,8 +548,8 @@ fn fetch_one_collation_at_a_time() { ) .await; - advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; - advertise_collation(&mut virtual_overseer, peer_c, test_state.relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_c, relay_parent, None).await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, @@ -850,26 +604,14 @@ fn fetch_one_collation_at_a_time() { /// timeout and in case of an error. #[test] fn fetches_next_collation() { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; + let first = test_state.relay_parent; let second = Hash::random(); - - let our_view = our_view![test_state.relay_parent, second]; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view.clone(), - )), - ) - .await; - - for hash in our_view.iter() { - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, *hash).await; - } + update_view(&mut virtual_overseer, &mut test_state, vec![(first, 0), (second, 1)]).await; let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -979,21 +721,13 @@ fn fetches_next_collation() { #[test] fn reject_connection_to_next_group() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; let peer_b = PeerId::random(); @@ -1026,26 +760,13 @@ fn reject_connection_to_next_group() { // invalid. #[test] fn fetch_next_collation_on_invalid_collation() { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let second = Hash::random(); - - let our_view = our_view![test_state.relay_parent, second]; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view.clone(), - )), - ) - .await; - - for hash in our_view.iter() { - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, *hash).await; - } + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -1068,12 +789,12 @@ fn fetch_next_collation_on_invalid_collation() { ) .await; - advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; - advertise_collation(&mut virtual_overseer, peer_c, test_state.relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_c, relay_parent, None).await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, - test_state.relay_parent, + relay_parent, test_state.chain_ids[0], None, ) @@ -1083,7 +804,7 @@ fn fetch_next_collation_on_invalid_collation() { let mut candidate_a = dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); candidate_a.descriptor.para_id = test_state.chain_ids[0]; - candidate_a.descriptor.relay_parent = test_state.relay_parent; + candidate_a.descriptor.relay_parent = relay_parent; candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); response_channel .send(Ok(( @@ -1098,7 +819,7 @@ fn fetch_next_collation_on_invalid_collation() { let receipt = assert_candidate_backing_second( &mut virtual_overseer, - test_state.relay_parent, + relay_parent, test_state.chain_ids[0], &pov, CollationVersion::V1, @@ -1108,7 +829,7 @@ fn fetch_next_collation_on_invalid_collation() { // Inform that the candidate was invalid. overseer_send( &mut virtual_overseer, - CollatorProtocolMessage::Invalid(test_state.relay_parent, receipt), + CollatorProtocolMessage::Invalid(relay_parent, receipt), ) .await; @@ -1125,7 +846,7 @@ fn fetch_next_collation_on_invalid_collation() { // We should see a request for another collation. assert_fetch_collation_request( &mut virtual_overseer, - test_state.relay_parent, + relay_parent, test_state.chain_ids[0], None, ) @@ -1137,25 +858,15 @@ fn fetch_next_collation_on_invalid_collation() { #[test] fn inactive_disconnected() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; - let hash_a = test_state.relay_parent; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![hash_a], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; let peer_b = PeerId::random(); @@ -1167,11 +878,11 @@ fn inactive_disconnected() { CollationVersion::V1, ) .await; - advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; assert_fetch_collation_request( &mut virtual_overseer, - test_state.relay_parent, + relay_parent, test_state.chain_ids[0], None, ) @@ -1186,31 +897,24 @@ fn inactive_disconnected() { #[test] fn activity_extends_life() { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; - let hash_a = test_state.relay_parent; - let hash_b = Hash::repeat_byte(1); - let hash_c = Hash::repeat_byte(2); - - let our_view = our_view![hash_a, hash_b, hash_c]; + let hash_a = Hash::from_low_u64_be(12); + let hash_b = Hash::from_low_u64_be(11); + let hash_c = Hash::from_low_u64_be(10); - overseer_send( + update_view( &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view.clone(), - )), + &mut test_state, + vec![(hash_a, 0), (hash_b, 1), (hash_c, 2)], ) .await; - for hash in our_view.iter() { - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, *hash).await; - } - let peer_b = PeerId::random(); connect_and_declare_collator( @@ -1268,21 +972,13 @@ fn activity_extends_life() { #[test] fn disconnect_if_no_declare() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; let peer_b = PeerId::random(); @@ -1305,26 +1001,16 @@ fn disconnect_if_no_declare() { #[test] fn disconnect_if_wrong_declare() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let pair = CollatorPair::generate().0; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; - let peer_b = PeerId::random(); + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( @@ -1367,26 +1053,16 @@ fn disconnect_if_wrong_declare() { #[test] fn delay_reputation_change() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| false), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let pair = CollatorPair::generate().0; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; - let peer_b = PeerId::random(); + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( @@ -1460,42 +1136,24 @@ fn view_change_clears_old_collators() { let pair = CollatorPair::generate().0; - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; - - let peer_b = PeerId::random(); + let peer = PeerId::random(); + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; connect_and_declare_collator( &mut virtual_overseer, - peer_b, + peer, pair.clone(), test_state.chain_ids[0], CollationVersion::V1, ) .await; - let hash_b = Hash::repeat_byte(69); - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![hash_b], - )), - ) - .await; - test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, hash_b).await; - assert_collator_disconnect(&mut virtual_overseer, peer_b).await; + update_view(&mut virtual_overseer, &mut test_state, vec![]).await; + + assert_collator_disconnect(&mut virtual_overseer, peer).await; virtual_overseer }) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index eda26e8539a1..fac63aeb2097 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -21,14 +21,11 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; use polkadot_primitives::{ vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2}, - AsyncBackingParams, BlockNumber, CandidateCommitments, Header, SigningContext, ValidatorId, + BlockNumber, CandidateCommitments, Header, SigningContext, ValidatorId, }; use polkadot_primitives_test_helpers::dummy_committed_candidate_receipt_v2; use rstest::rstest; -const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = - AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; - fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) } @@ -48,7 +45,8 @@ async fn assert_construct_per_relay_parent( msg, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) - ) if parent == hash => { + ) => { + assert_eq!(parent, hash); tx.send(Ok(test_state.validator_public.clone())).unwrap(); } ); @@ -65,15 +63,6 @@ async fn assert_construct_per_relay_parent( } ); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) - ) if parent == hash => { - tx.send(Ok(test_state.cores.clone())).unwrap(); - } - ); - assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -88,12 +77,11 @@ async fn assert_construct_per_relay_parent( /// Handle a view update. pub(super) async fn update_view( virtual_overseer: &mut VirtualOverseer, - test_state: &TestState, + test_state: &mut TestState, new_view: Vec<(Hash, u32)>, // Hash and block number. - activated: u8, // How many new heads does this update contain? ) -> Option { + let last_block_from_view = new_view.last().map(|t| t.1); let new_view: HashMap = HashMap::from_iter(new_view); - let our_view = OurView::new(new_view.keys().map(|hash| *hash), 0); overseer_send( @@ -103,9 +91,14 @@ pub(super) async fn update_view( .await; let mut next_overseer_message = None; - for _ in 0..activated { + for _ in 0..new_view.len() { + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => overseer_recv(virtual_overseer).await, + }; + let (leaf_hash, leaf_number) = assert_matches!( - overseer_recv(virtual_overseer).await, + msg, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, RuntimeApiRequest::SessionIndexForChild(tx) @@ -121,7 +114,7 @@ pub(super) async fn update_view( _, RuntimeApiRequest::AsyncBackingParams(tx), )) => { - tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + tx.send(Ok(test_state.async_backing_params)).unwrap(); } ); @@ -144,7 +137,8 @@ pub(super) async fn update_view( ) .await; - let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); + let min_number = + leaf_number.saturating_sub(test_state.async_backing_params.allowed_ancestry_len); let ancestry_len = leaf_number + 1 - min_number; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) @@ -157,6 +151,10 @@ pub(super) async fn update_view( { let mut ancestry_iter = ancestry_iter.clone(); while let Some((hash, number)) = ancestry_iter.next() { + if Some(number) == test_state.last_known_block { + break; + } + // May be `None` for the last element. let parent_hash = ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); @@ -204,6 +202,9 @@ pub(super) async fn update_view( // Skip the leaf. for (hash, number) in ancestry_iter.skip(1).take(requested_len.saturating_sub(1)) { + if Some(number) == test_state.last_known_block { + break; + } assert_construct_per_relay_parent( virtual_overseer, test_state, @@ -214,6 +215,9 @@ pub(super) async fn update_view( .await; } } + + test_state.last_known_block = last_block_from_view; + next_overseer_message } @@ -337,9 +341,140 @@ async fn assert_persisted_validation_data( } } +// Combines dummy candidate creation, advertisement and fetching in a single call +async fn submit_second_and_assert( + virtual_overseer: &mut VirtualOverseer, + keystore: KeystorePtr, + para_id: ParaId, + relay_parent: Hash, + collator: PeerId, + candidate_head_data: HeadData, +) { + let (candidate, commitments) = + create_dummy_candidate_and_commitments(para_id, candidate_head_data, relay_parent); + + let candidate_hash = candidate.hash(); + let parent_head_data_hash = Hash::zero(); + + assert_advertise_collation( + virtual_overseer, + collator, + relay_parent, + para_id, + (candidate_hash, parent_head_data_hash), + ) + .await; + + let response_channel = assert_fetch_collation_request( + virtual_overseer, + relay_parent, + para_id, + Some(candidate_hash), + ) + .await; + + let pov = PoV { block_data: BlockData(vec![1]) }; + + send_collation_and_assert_processing( + virtual_overseer, + keystore, + relay_parent, + para_id, + collator, + response_channel, + candidate, + commitments, + pov, + ) + .await; +} + +fn create_dummy_candidate_and_commitments( + para_id: ParaId, + candidate_head_data: HeadData, + relay_parent: Hash, +) -> (CandidateReceipt, CandidateCommitments) { + let mut candidate = dummy_candidate_receipt_bad_sig(relay_parent, Some(Default::default())); + candidate.descriptor.para_id = para_id; + candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); + let commitments = CandidateCommitments { + head_data: candidate_head_data, + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate.commitments_hash = commitments.hash(); + + (candidate.into(), commitments) +} + +async fn assert_advertise_collation( + virtual_overseer: &mut VirtualOverseer, + peer: PeerId, + relay_parent: Hash, + expected_para_id: ParaId, + candidate: (CandidateHash, Hash), +) { + advertise_collation(virtual_overseer, peer, relay_parent, Some(candidate)).await; + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate.0); + assert_eq!(request.candidate_para_id, expected_para_id); + assert_eq!(request.parent_head_data_hash, candidate.1); + tx.send(true).expect("receiving side should be alive"); + } + ); +} + +async fn send_collation_and_assert_processing( + virtual_overseer: &mut VirtualOverseer, + keystore: KeystorePtr, + relay_parent: Hash, + expected_para_id: ParaId, + expected_peer_id: PeerId, + response_channel: ResponseSender, + candidate: CandidateReceipt, + commitments: CandidateCommitments, + pov: PoV, +) { + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation(candidate.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); + + assert_candidate_backing_second( + virtual_overseer, + relay_parent, + expected_para_id, + &pov, + CollationVersion::V2, + ) + .await; + + let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + + send_seconded_statement(virtual_overseer, keystore.clone(), &candidate).await; + + assert_collation_seconded( + virtual_overseer, + relay_parent, + expected_peer_id, + CollationVersion::V2, + ) + .await; +} + #[test] fn v1_advertisement_accepted_and_seconded() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; @@ -349,7 +484,7 @@ fn v1_advertisement_accepted_and_seconded() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 0; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -377,7 +512,7 @@ fn v1_advertisement_accepted_and_seconded() { candidate.descriptor.para_id = test_state.chain_ids[0]; candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); let commitments = CandidateCommitments { - head_data: HeadData(vec![1 as u8]), + head_data: HeadData(vec![1u8]), horizontal_messages: Default::default(), upward_messages: Default::default(), new_validation_code: None, @@ -418,7 +553,7 @@ fn v1_advertisement_accepted_and_seconded() { #[test] fn v1_advertisement_rejected_on_non_active_leaf() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -428,7 +563,7 @@ fn v1_advertisement_rejected_on_non_active_leaf() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 5; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -460,7 +595,7 @@ fn v1_advertisement_rejected_on_non_active_leaf() { #[test] fn accept_advertisements_from_implicit_view() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -478,7 +613,7 @@ fn accept_advertisements_from_implicit_view() { let head_d = get_parent_hash(head_c); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); let peer_b = PeerId::random(); @@ -563,24 +698,26 @@ fn accept_advertisements_from_implicit_view() { #[test] fn second_multiple_candidates_per_relay_parent() { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; let pair = CollatorPair::generate().0; - // Grandparent of head `a`. + let head_a = Hash::from_low_u64_be(130); + let head_a_num: u32 = 0; + let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 2; - // Grandparent of head `b`. - // Group rotation frequency is 1 by default, at `c` we're assigned - // to the first para. - let head_c = Hash::from_low_u64_be(130); - - // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + // Activated leaf is `a` and `b`.The collation will be based on `b`. + update_view( + &mut virtual_overseer, + &mut test_state, + vec![(head_a, head_a_num), (head_b, head_b_num)], + ) + .await; let peer_a = PeerId::random(); @@ -593,80 +730,17 @@ fn second_multiple_candidates_per_relay_parent() { ) .await; - for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) { - let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); - candidate.descriptor.para_id = test_state.chain_ids[0]; - candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); - let commitments = CandidateCommitments { - head_data: HeadData(vec![i as u8]), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }; - candidate.commitments_hash = commitments.hash(); - let candidate: CandidateReceipt = candidate.into(); - - let candidate_hash = candidate.hash(); - let parent_head_data_hash = Hash::zero(); - - advertise_collation( - &mut virtual_overseer, - peer_a, - head_c, - Some((candidate_hash, parent_head_data_hash)), - ) - .await; - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidate_hash); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - tx.send(true).expect("receiving side should be alive"); - } - ); - - let response_channel = assert_fetch_collation_request( - &mut virtual_overseer, - head_c, - test_state.chain_ids[0], - Some(candidate_hash), - ) - .await; - - let pov = PoV { block_data: BlockData(vec![1]) }; - - response_channel - .send(Ok(( - request_v2::CollationFetchingResponse::Collation( - candidate.clone(), - pov.clone(), - ) - .encode(), - ProtocolName::from(""), - ))) - .expect("Sending response should succeed"); - - assert_candidate_backing_second( + // `allowed_ancestry_len` equals the size of the claim queue + for i in 0..test_state.async_backing_params.allowed_ancestry_len { + submit_second_and_assert( &mut virtual_overseer, - head_c, + keystore.clone(), test_state.chain_ids[0], - &pov, - CollationVersion::V2, + head_a, + peer_a, + HeadData(vec![i as u8]), ) .await; - - let candidate = - CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; - - send_seconded_statement(&mut virtual_overseer, keystore.clone(), &candidate).await; - - assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2) - .await; } // No more advertisements can be made for this relay parent. @@ -674,21 +748,14 @@ fn second_multiple_candidates_per_relay_parent() { advertise_collation( &mut virtual_overseer, peer_a, - head_c, + head_a, Some((candidate_hash, Hash::zero())), ) .await; - // Reported because reached the limit of advertisements per relay parent. - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridgeTx( - NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer_id, rep)), - ) => { - assert_eq!(peer_a, peer_id); - assert_eq!(rep.value, COST_UNEXPECTED_MESSAGE.cost_or_benefit()); - } - ); + // Rejected but not reported because reached the limit of advertisements for the para_id + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); // By different peer too (not reported). let pair_b = CollatorPair::generate().0; @@ -707,7 +774,7 @@ fn second_multiple_candidates_per_relay_parent() { advertise_collation( &mut virtual_overseer, peer_b, - head_c, + head_a, Some((candidate_hash, Hash::zero())), ) .await; @@ -721,7 +788,7 @@ fn second_multiple_candidates_per_relay_parent() { #[test] fn fetched_collation_sanity_check() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -738,7 +805,7 @@ fn fetched_collation_sanity_check() { let head_c = Hash::from_low_u64_be(130); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -832,7 +899,7 @@ fn fetched_collation_sanity_check() { #[test] fn sanity_check_invalid_parent_head_data() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -842,7 +909,7 @@ fn sanity_check_invalid_parent_head_data() { let head_c = Hash::from_low_u64_be(130); let head_c_num = 3; - update_view(&mut virtual_overseer, &test_state, vec![(head_c, head_c_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_c, head_c_num)]).await; let peer_a = PeerId::random(); @@ -952,7 +1019,7 @@ fn sanity_check_invalid_parent_head_data() { #[test] fn advertisement_spam_protection() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -965,7 +1032,7 @@ fn advertisement_spam_protection() { let head_c = get_parent_hash(head_b); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); connect_and_declare_collator( @@ -1026,7 +1093,7 @@ fn advertisement_spam_protection() { #[case(true)] #[case(false)] fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; @@ -1043,7 +1110,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { let head_c = Hash::from_low_u64_be(130); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -1344,7 +1411,7 @@ fn v2_descriptor(#[case] v2_feature_enabled: bool) { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 0; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -1442,7 +1509,7 @@ fn v2_descriptor(#[case] v2_feature_enabled: bool) { #[test] fn invalid_v2_descriptor() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -1452,7 +1519,7 @@ fn invalid_v2_descriptor() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 0; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -1545,3 +1612,868 @@ fn invalid_v2_descriptor() { virtual_overseer }); } + +#[test] +fn fair_collation_fetches() { + let mut test_state = TestState::with_shared_core(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 2; + + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + + let peer_a = PeerId::random(); + let pair_a = CollatorPair::generate().0; + + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[0], + CollationVersion::V2, + ) + .await; + + let peer_b = PeerId::random(); + let pair_b = CollatorPair::generate().0; + + connect_and_declare_collator( + &mut virtual_overseer, + peer_b, + pair_b.clone(), + test_state.chain_ids[1], + CollationVersion::V2, + ) + .await; + + // `peer_a` sends two advertisements (its claim queue limit) + for i in 0..2u8 { + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + head_b, + peer_a, + HeadData(vec![i]), + ) + .await; + } + + // `peer_a` sends another advertisement and it is ignored + let candidate_hash = CandidateHash(Hash::repeat_byte(0xAA)); + advertise_collation( + &mut virtual_overseer, + peer_a, + head_b, + Some((candidate_hash, Hash::zero())), + ) + .await; + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // `peer_b` should still be able to advertise its collation + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[1]), + head_b, + peer_b, + HeadData(vec![0u8]), + ) + .await; + + // And no more advertisements can be made for this relay parent. + + // verify for peer_a + let candidate_hash = CandidateHash(Hash::repeat_byte(0xBB)); + advertise_collation( + &mut virtual_overseer, + peer_a, + head_b, + Some((candidate_hash, Hash::zero())), + ) + .await; + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // verify for peer_b + let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC)); + advertise_collation( + &mut virtual_overseer, + peer_b, + head_b, + Some((candidate_hash, Hash::zero())), + ) + .await; + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} + +#[test] +fn collation_fetching_prefer_entries_earlier_in_claim_queue() { + let mut test_state = TestState::with_shared_core(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + let pair_b = CollatorPair::generate().0; + let collator_b = PeerId::random(); + let para_id_b = test_state.chain_ids[1]; + + let head = Hash::from_low_u64_be(128); + let head_num: u32 = 2; + + update_view(&mut virtual_overseer, &mut test_state, vec![(head, head_num)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_b, + pair_b.clone(), + para_id_b, + CollationVersion::V2, + ) + .await; + + let (candidate_a1, commitments_a1) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![0u8]), head); + let (candidate_b1, commitments_b1) = + create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![1u8]), head); + let (candidate_a2, commitments_a2) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![2u8]), head); + let (candidate_a3, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), head); + let parent_head_data_a1 = HeadData(vec![0u8]); + let parent_head_data_b1 = HeadData(vec![1u8]); + let parent_head_data_a2 = HeadData(vec![2u8]); + let parent_head_data_a3 = HeadData(vec![3u8]); + + // advertise a collation for `para_id_a` but don't send the collation. This will be a + // pending fetch. + assert_advertise_collation( + &mut virtual_overseer, + collator_a, + head, + para_id_a, + (candidate_a1.hash(), parent_head_data_a1.hash()), + ) + .await; + + let response_channel_a1 = assert_fetch_collation_request( + &mut virtual_overseer, + head, + para_id_a, + Some(candidate_a1.hash()), + ) + .await; + + // advertise another collation for `para_id_a`. This one should be fetched last. + assert_advertise_collation( + &mut virtual_overseer, + collator_a, + head, + para_id_a, + (candidate_a2.hash(), parent_head_data_a2.hash()), + ) + .await; + + // There is a pending collation so nothing should be fetched + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Advertise a collation for `para_id_b`. This should be fetched second + assert_advertise_collation( + &mut virtual_overseer, + collator_b, + head, + para_id_b, + (candidate_b1.hash(), parent_head_data_b1.hash()), + ) + .await; + + // Again - no fetch because of the pending collation + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + //Now send a response for the first fetch and examine the second fetch + send_collation_and_assert_processing( + &mut virtual_overseer, + keystore.clone(), + head, + para_id_a, + collator_a, + response_channel_a1, + candidate_a1, + commitments_a1, + PoV { block_data: BlockData(vec![1]) }, + ) + .await; + + // The next fetch should be for `para_id_b` + let response_channel_b = assert_fetch_collation_request( + &mut virtual_overseer, + head, + para_id_b, + Some(candidate_b1.hash()), + ) + .await; + + send_collation_and_assert_processing( + &mut virtual_overseer, + keystore.clone(), + head, + para_id_b, + collator_b, + response_channel_b, + candidate_b1, + commitments_b1, + PoV { block_data: BlockData(vec![2]) }, + ) + .await; + + // and the final one for `para_id_a` + let response_channel_a2 = assert_fetch_collation_request( + &mut virtual_overseer, + head, + para_id_a, + Some(candidate_a2.hash()), + ) + .await; + + // Advertise another collation for `para_id_a`. This should be rejected as there is no slot + // in the claim queue for it. One is fetched and one is pending. + advertise_collation( + &mut virtual_overseer, + collator_a, + head, + Some((candidate_a3.hash(), parent_head_data_a3.hash())), + ) + .await; + + // `CanSecond` shouldn't be sent as the advertisement should be ignored + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Fetch the pending collation + send_collation_and_assert_processing( + &mut virtual_overseer, + keystore.clone(), + head, + para_id_a, + collator_a, + response_channel_a2, + candidate_a2, + commitments_a2, + PoV { block_data: BlockData(vec![3]) }, + ) + .await; + + virtual_overseer + }); +} + +#[test] +fn collation_fetching_considers_advertisements_from_the_whole_view() { + let mut test_state = TestState::with_shared_core(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + let pair_b = CollatorPair::generate().0; + let collator_b = PeerId::random(); + let para_id_b = test_state.chain_ids[1]; + + let relay_parent_2 = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); + + assert_eq!( + *test_state.claim_queue.get(&CoreIndex(0)).unwrap(), + VecDeque::from([para_id_b, para_id_a, para_id_a]) + ); + + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_2, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_b, + pair_b.clone(), + para_id_b, + CollationVersion::V2, + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_2, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_b, + relay_parent_2, + collator_b, + HeadData(vec![1u8]), + ) + .await; + + let relay_parent_3 = Hash::from_low_u64_be(relay_parent_2.to_low_u64_be() - 1); + *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = + VecDeque::from([para_id_a, para_id_a, para_id_b]); + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_3, 3)]).await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_b, + relay_parent_3, + collator_b, + HeadData(vec![3u8]), + ) + .await; + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_3, + collator_a, + HeadData(vec![3u8]), + ) + .await; + + // At this point the claim queue is satisfied and any advertisement at `relay_parent_4` + // must be ignored + + let (candidate_a, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![5u8]), relay_parent_3); + let parent_head_data_a = HeadData(vec![5u8]); + + advertise_collation( + &mut virtual_overseer, + collator_a, + relay_parent_3, + Some((candidate_a.hash(), parent_head_data_a.hash())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + let (candidate_b, _) = + create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![6u8]), relay_parent_3); + let parent_head_data_b = HeadData(vec![6u8]); + + advertise_collation( + &mut virtual_overseer, + collator_b, + relay_parent_3, + Some((candidate_b.hash(), parent_head_data_b.hash())), + ) + .await; + + // `CanSecond` shouldn't be sent as the advertisement should be ignored + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // At `relay_parent_6` the advertisement for `para_id_b` falls out of the view so a new one + // can be accepted + let relay_parent_6 = Hash::from_low_u64_be(relay_parent_3.to_low_u64_be() - 2); + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_6, 6)]).await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_6, + collator_a, + HeadData(vec![3u8]), + ) + .await; + + virtual_overseer + }); +} + +#[test] +fn collation_fetching_fairness_handles_old_claims() { + let mut test_state = TestState::with_shared_core(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + let pair_b = CollatorPair::generate().0; + let collator_b = PeerId::random(); + let para_id_b = test_state.chain_ids[1]; + + let relay_parent_2 = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); + + *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = + VecDeque::from([para_id_a, para_id_b, para_id_a]); + + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_2, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_b, + pair_b.clone(), + para_id_b, + CollationVersion::V2, + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_2, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_b, + relay_parent_2, + collator_b, + HeadData(vec![1u8]), + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_2, + collator_a, + HeadData(vec![2u8]), + ) + .await; + + let relay_parent_3 = Hash::from_low_u64_be(relay_parent_2.to_low_u64_be() - 1); + + *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = + VecDeque::from([para_id_b, para_id_a, para_id_b]); + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_3, 3)]).await; + + // nothing is advertised here + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + let relay_parent_4 = Hash::from_low_u64_be(relay_parent_3.to_low_u64_be() - 1); + + *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = + VecDeque::from([para_id_a, para_id_b, para_id_a]); + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_4, 4)]).await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_b, + relay_parent_4, + collator_b, + HeadData(vec![3u8]), + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_4, + collator_a, + HeadData(vec![4u8]), + ) + .await; + + // At this point the claim queue is satisfied and any advertisement at `relay_parent_4` + // must be ignored + + // Advertisement for `para_id_a` at `relay_parent_4` which must be ignored + let (candidate_a, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![5u8]), relay_parent_4); + let parent_head_data_a = HeadData(vec![5u8]); + + advertise_collation( + &mut virtual_overseer, + collator_a, + relay_parent_4, + Some((candidate_a.hash(), parent_head_data_a.hash())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Advertisement for `para_id_b` at `relay_parent_4` which must be ignored + let (candidate_b, _) = + create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![6u8]), relay_parent_4); + let parent_head_data_b = HeadData(vec![6u8]); + + advertise_collation( + &mut virtual_overseer, + collator_b, + relay_parent_4, + Some((candidate_b.hash(), parent_head_data_b.hash())), + ) + .await; + + // `CanSecond` shouldn't be sent as the advertisement should be ignored + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} + +#[test] +fn claims_below_are_counted_correctly() { + let mut test_state = TestState::with_one_scheduled_para(); + + // Shorten the claim queue to make the test smaller + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] + .into_iter(), + ), + ); + test_state.claim_queue = claim_queue; + test_state.async_backing_params.max_candidate_depth = 3; + test_state.async_backing_params.allowed_ancestry_len = 2; + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); + let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); + let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + // A collation at hash_a claims the spot at hash_a + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_a, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + // Another collation at hash_a claims the spot at hash_b + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_a, + collator_a, + HeadData(vec![1u8]), + ) + .await; + + // Collation at hash_c claims its own spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_c, + collator_a, + HeadData(vec![2u8]), + ) + .await; + + // Collation at hash_b should be ignored because the claim queue is satisfied + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_b); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_b, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} + +#[test] +fn claims_above_are_counted_correctly() { + let mut test_state = TestState::with_one_scheduled_para(); + + // Shorten the claim queue to make the test smaller + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] + .into_iter(), + ), + ); + test_state.claim_queue = claim_queue; + test_state.async_backing_params.max_candidate_depth = 3; + test_state.async_backing_params.allowed_ancestry_len = 2; + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); // block 0 + let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); // block 1 + let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); // block 2 + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + // A collation at hash_b claims the spot at hash_b + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_b, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + // Another collation at hash_b claims the spot at hash_c + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_b, + collator_a, + HeadData(vec![1u8]), + ) + .await; + + // Collation at hash_a claims its own spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_a, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + // Another Collation at hash_a should be ignored because the claim queue is satisfied + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![2u8]), hash_a); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_a, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Same for hash_b + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_b); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_b, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} + +#[test] +fn claim_fills_last_free_slot() { + let mut test_state = TestState::with_one_scheduled_para(); + + // Shorten the claim queue to make the test smaller + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] + .into_iter(), + ), + ); + test_state.claim_queue = claim_queue; + test_state.async_backing_params.max_candidate_depth = 3; + test_state.async_backing_params.allowed_ancestry_len = 2; + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); // block 0 + let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); // block 1 + let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); // block 2 + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + // A collation at hash_a claims its spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_a, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + // Collation at hash_b claims its own spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_b, + collator_a, + HeadData(vec![3u8]), + ) + .await; + + // Collation at hash_c claims its own spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_c, + collator_a, + HeadData(vec![2u8]), + ) + .await; + + // Another Collation at hash_a should be ignored because the claim queue is satisfied + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_a); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_a, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Same for hash_b + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![4u8]), hash_b); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_b, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index b4dcafe09eb6..4f2f9ccadf8b 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Dispute Distribution subsystem, which ensures all concer authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index c8c19e5de070..7d17ea45eab9 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Gossip Support subsystem. Responsible for keeping track authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 3d51d3c0a565..0bcf224332bc 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Primitives types for the Node-side" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index 4dd7d29fc25c..f4c1a07ba3c2 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -575,6 +575,22 @@ impl RequiredRouting { _ => false, } } + + /// Combine two required routing sets into one that would cover both routing modes. + pub fn combine(self, other: Self) -> Self { + match (self, other) { + (RequiredRouting::All, _) | (_, RequiredRouting::All) => RequiredRouting::All, + (RequiredRouting::GridXY, _) | (_, RequiredRouting::GridXY) => RequiredRouting::GridXY, + (RequiredRouting::GridX, RequiredRouting::GridY) | + (RequiredRouting::GridY, RequiredRouting::GridX) => RequiredRouting::GridXY, + (RequiredRouting::GridX, RequiredRouting::GridX) => RequiredRouting::GridX, + (RequiredRouting::GridY, RequiredRouting::GridY) => RequiredRouting::GridY, + (RequiredRouting::None, RequiredRouting::PendingTopology) | + (RequiredRouting::PendingTopology, RequiredRouting::None) => RequiredRouting::PendingTopology, + (RequiredRouting::None, _) | (RequiredRouting::PendingTopology, _) => other, + (_, RequiredRouting::None) | (_, RequiredRouting::PendingTopology) => self, + } + } } #[cfg(test)] @@ -587,6 +603,50 @@ mod tests { rand_chacha::ChaCha12Rng::seed_from_u64(12345) } + #[test] + fn test_required_routing_combine() { + assert_eq!(RequiredRouting::All.combine(RequiredRouting::None), RequiredRouting::All); + assert_eq!(RequiredRouting::All.combine(RequiredRouting::GridXY), RequiredRouting::All); + assert_eq!(RequiredRouting::GridXY.combine(RequiredRouting::All), RequiredRouting::All); + assert_eq!(RequiredRouting::None.combine(RequiredRouting::All), RequiredRouting::All); + assert_eq!(RequiredRouting::None.combine(RequiredRouting::None), RequiredRouting::None); + assert_eq!( + RequiredRouting::PendingTopology.combine(RequiredRouting::GridX), + RequiredRouting::GridX + ); + + assert_eq!( + RequiredRouting::GridX.combine(RequiredRouting::PendingTopology), + RequiredRouting::GridX + ); + assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::GridY), RequiredRouting::GridXY); + assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::GridX), RequiredRouting::GridXY); + assert_eq!( + RequiredRouting::GridXY.combine(RequiredRouting::GridXY), + RequiredRouting::GridXY + ); + assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::GridX), RequiredRouting::GridX); + assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::GridY), RequiredRouting::GridY); + + assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridY), RequiredRouting::GridY); + assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridX), RequiredRouting::GridX); + assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridXY), RequiredRouting::GridXY); + + assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::None), RequiredRouting::GridY); + assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::None), RequiredRouting::GridX); + assert_eq!(RequiredRouting::GridXY.combine(RequiredRouting::None), RequiredRouting::GridXY); + + assert_eq!( + RequiredRouting::PendingTopology.combine(RequiredRouting::None), + RequiredRouting::PendingTopology + ); + + assert_eq!( + RequiredRouting::None.combine(RequiredRouting::PendingTopology), + RequiredRouting::PendingTopology + ); + } + #[test] fn test_random_routing_sample() { // This test is fragile as it relies on a specific ChaCha12Rng diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index de07937ffb0a..d737c7bf8968 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index 2253a5ae0c66..62634c1da090 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "System overseer of the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 7185205f905b..50ee3a80ddb8 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 6e8eade21a43..c1e06dd830b5 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -6,6 +6,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Utils to tie different Polkadot components together and allow instantiation of a node." +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -208,6 +210,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "westend-runtime?/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-system/try-runtime", diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 279b6ff80704..e4ea6efeaac2 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -210,7 +210,7 @@ pub fn validator_overseer_builder( AuthorityDiscoveryService, >, ChainApiSubsystem, - CollationGenerationSubsystem, + DummySubsystem, CollatorProtocolSubsystem, ApprovalDistributionSubsystem, ApprovalVotingSubsystem, @@ -237,6 +237,7 @@ where let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; let approval_voting_parallel_metrics: ApprovalVotingParallelMetrics = Metrics::register(registry)?; + let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -295,7 +296,7 @@ where )) .pvf_checker(PvfCheckerSubsystem::new(keystore.clone(), Metrics::register(registry)?)) .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) + .collation_generation(DummySubsystem) .collator_protocol({ let side = match is_parachain_node { IsParachainNode::Collator(_) | IsParachainNode::FullNode => @@ -434,7 +435,7 @@ pub fn validator_with_parallel_overseer_builder( AuthorityDiscoveryService, >, ChainApiSubsystem, - CollationGenerationSubsystem, + DummySubsystem, CollatorProtocolSubsystem, DummySubsystem, DummySubsystem, @@ -519,7 +520,7 @@ where )) .pvf_checker(PvfCheckerSubsystem::new(keystore.clone(), Metrics::register(registry)?)) .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) + .collation_generation(DummySubsystem) .collator_protocol({ let side = match is_parachain_node { IsParachainNode::Collator(_) | IsParachainNode::FullNode => diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index b5686ec96be1..44bb7036d63d 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 28a3a1ab82ab..b541f9519219 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -48,12 +48,12 @@ use polkadot_primitives::{ CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, }, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash, - CandidateIndex, CollatorId, CoreIndex, DisputeState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, PvfExecKind as RuntimePvfExecKind, SessionIndex, - SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + CandidateIndex, CoreIndex, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, PvfExecKind as RuntimePvfExecKind, SessionIndex, SessionInfo, + SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -250,9 +250,6 @@ pub enum CollatorProtocolMessage { /// The core index where the candidate should be backed. core_index: CoreIndex, }, - /// Report a collator as having provided an invalid collation. This should lead to disconnect - /// and blacklist of the collator. - ReportCollator(CollatorId), /// Get a network bridge update. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index d12daa572055..9c21fede1c47 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index 6f2191e7add2..67f5dad518e1 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -22,12 +22,13 @@ use polkadot_node_subsystem::{ }; use polkadot_primitives::{AsyncBackingParams, BlockNumber, Hash, Id as ParaId}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use crate::{ inclusion_emulator::RelayChainBlockInfo, request_async_backing_params, request_session_index_for_child, runtime::{self, recv_runtime}, + LOG_TARGET, }; // Always aim to retain 1 block before the active leaves. @@ -173,13 +174,7 @@ impl View { return Err(FetchError::AlreadyKnown) } - let res = fetch_fresh_leaf_and_insert_ancestry( - leaf_hash, - &mut self.block_info_storage, - &mut *sender, - self.collating_for, - ) - .await; + let res = self.fetch_fresh_leaf_and_insert_ancestry(leaf_hash, &mut *sender).await; match res { Ok(fetched) => { @@ -323,6 +318,205 @@ impl View { .as_ref() .map(|mins| mins.allowed_relay_parents_for(para_id, block_info.block_number)) } + + /// Returns all paths from each leaf to the last block in state containing `relay_parent`. If no + /// paths exist the function will return an empty `Vec`. + pub fn paths_via_relay_parent(&self, relay_parent: &Hash) -> Vec> { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + leaves=?self.leaves, + block_info_storage=?self.block_info_storage, + "Finding paths via relay parent" + ); + + if self.leaves.is_empty() { + // No leaves so the view should be empty. Don't return any paths. + return vec![] + }; + + if !self.block_info_storage.contains_key(relay_parent) { + // `relay_parent` is not in the view - don't return any paths + return vec![] + } + + // Find all paths from each leaf to `relay_parent`. + let mut paths = Vec::new(); + for (leaf, _) in &self.leaves { + let mut path = Vec::new(); + let mut current_leaf = *leaf; + let mut visited = HashSet::new(); + let mut path_contains_target = false; + + // Start from the leaf and traverse all known blocks + loop { + if visited.contains(¤t_leaf) { + // There is a cycle - abandon this path + break + } + + current_leaf = match self.block_info_storage.get(¤t_leaf) { + Some(info) => { + // `current_leaf` is a known block - add it to the path and mark it as + // visited + path.push(current_leaf); + visited.insert(current_leaf); + + // `current_leaf` is the target `relay_parent`. Mark the path so that it's + // included in the result + if current_leaf == *relay_parent { + path_contains_target = true; + } + + // update `current_leaf` with the parent + info.parent_hash + }, + None => { + // path is complete + if path_contains_target { + // we want the path ordered from oldest to newest so reverse it + paths.push(path.into_iter().rev().collect()); + } + break + }, + }; + } + } + + paths + } + + async fn fetch_fresh_leaf_and_insert_ancestry( + &mut self, + leaf_hash: Hash, + sender: &mut Sender, + ) -> Result + where + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, + { + let leaf_header = { + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; + + match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), + } + }; + + // If the node is a collator, bypass prospective-parachains. We're only interested in the + // one paraid and the subsystem is not present. + let min_relay_parents = if let Some(para_id) = self.collating_for { + fetch_min_relay_parents_for_collator(leaf_hash, leaf_header.number, sender) + .await? + .map(|x| vec![(para_id, x)]) + .unwrap_or_default() + } else { + fetch_min_relay_parents_from_prospective_parachains(leaf_hash, sender).await? + }; + + let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); + let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; + + let ancestry = if leaf_header.number > 0 { + let mut next_ancestor_number = leaf_header.number - 1; + let mut next_ancestor_hash = leaf_header.parent_hash; + + let mut ancestry = Vec::with_capacity(expected_ancestry_len); + ancestry.push(leaf_hash); + + // Ensure all ancestors up to and including `min_min` are in the + // block storage. When views advance incrementally, everything + // should already be present. + while next_ancestor_number >= min_min { + let parent_hash = if let Some(info) = + self.block_info_storage.get(&next_ancestor_hash) + { + info.parent_hash + } else { + // load the header and insert into block storage. + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await; + + let header = match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), + }; + + self.block_info_storage.insert( + next_ancestor_hash, + BlockInfo { + block_number: next_ancestor_number, + parent_hash: header.parent_hash, + maybe_allowed_relay_parents: None, + }, + ); + + header.parent_hash + }; + + ancestry.push(next_ancestor_hash); + if next_ancestor_number == 0 { + break + } + + next_ancestor_number -= 1; + next_ancestor_hash = parent_hash; + } + + ancestry + } else { + vec![leaf_hash] + }; + + let fetched_ancestry = + FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; + + let allowed_relay_parents = AllowedRelayParents { + minimum_relay_parents: min_relay_parents.into_iter().collect(), + allowed_relay_parents_contiguous: ancestry, + }; + + let leaf_block_info = BlockInfo { + parent_hash: leaf_header.parent_hash, + block_number: leaf_header.number, + maybe_allowed_relay_parents: Some(allowed_relay_parents), + }; + + self.block_info_storage.insert(leaf_hash, leaf_block_info); + + Ok(fetched_ancestry) + } } /// Errors when fetching a leaf and associated ancestry. @@ -437,137 +631,6 @@ where Ok(Some(min)) } -async fn fetch_fresh_leaf_and_insert_ancestry( - leaf_hash: Hash, - block_info_storage: &mut HashMap, - sender: &mut Sender, - collating_for: Option, -) -> Result -where - Sender: SubsystemSender - + SubsystemSender - + SubsystemSender, -{ - let leaf_header = { - let (tx, rx) = oneshot::channel(); - sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; - - match rx.await { - Ok(Ok(Some(header))) => header, - Ok(Ok(None)) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::Unknown, - )), - Ok(Err(e)) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::Internal(e), - )), - Err(_) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::SubsystemUnavailable, - )), - } - }; - - // If the node is a collator, bypass prospective-parachains. We're only interested in the one - // paraid and the subsystem is not present. - let min_relay_parents = if let Some(para_id) = collating_for { - fetch_min_relay_parents_for_collator(leaf_hash, leaf_header.number, sender) - .await? - .map(|x| vec![(para_id, x)]) - .unwrap_or_default() - } else { - fetch_min_relay_parents_from_prospective_parachains(leaf_hash, sender).await? - }; - - let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); - let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; - - let ancestry = if leaf_header.number > 0 { - let mut next_ancestor_number = leaf_header.number - 1; - let mut next_ancestor_hash = leaf_header.parent_hash; - - let mut ancestry = Vec::with_capacity(expected_ancestry_len); - ancestry.push(leaf_hash); - - // Ensure all ancestors up to and including `min_min` are in the - // block storage. When views advance incrementally, everything - // should already be present. - while next_ancestor_number >= min_min { - let parent_hash = if let Some(info) = block_info_storage.get(&next_ancestor_hash) { - info.parent_hash - } else { - // load the header and insert into block storage. - let (tx, rx) = oneshot::channel(); - sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await; - - let header = match rx.await { - Ok(Ok(Some(header))) => header, - Ok(Ok(None)) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::Unknown, - )), - Ok(Err(e)) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::Internal(e), - )), - Err(_) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::SubsystemUnavailable, - )), - }; - - block_info_storage.insert( - next_ancestor_hash, - BlockInfo { - block_number: next_ancestor_number, - parent_hash: header.parent_hash, - maybe_allowed_relay_parents: None, - }, - ); - - header.parent_hash - }; - - ancestry.push(next_ancestor_hash); - if next_ancestor_number == 0 { - break - } - - next_ancestor_number -= 1; - next_ancestor_hash = parent_hash; - } - - ancestry - } else { - vec![leaf_hash] - }; - - let fetched_ancestry = - FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; - - let allowed_relay_parents = AllowedRelayParents { - minimum_relay_parents: min_relay_parents.into_iter().collect(), - allowed_relay_parents_contiguous: ancestry, - }; - - let leaf_block_info = BlockInfo { - parent_hash: leaf_header.parent_hash, - block_number: leaf_header.number, - maybe_allowed_relay_parents: Some(allowed_relay_parents), - }; - - block_info_storage.insert(leaf_hash, leaf_block_info); - - Ok(fetched_ancestry) -} - #[cfg(test)] mod tests { use super::*; @@ -798,6 +861,23 @@ mod tests { assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..(PARA_A_MIN_PARENT - 1) as usize])); assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)), Some(&expected_ancestry[..])); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + + assert_eq!(view.leaves.len(), 1); + assert!(view.leaves.contains_key(leaf)); + assert!(view.paths_via_relay_parent(&CHAIN_B[0]).is_empty()); + assert!(view.paths_via_relay_parent(&CHAIN_A[0]).is_empty()); + assert_eq!( + view.paths_via_relay_parent(&CHAIN_B[min_min_idx]), + vec![CHAIN_B[min_min_idx..].to_vec()] + ); + assert_eq!( + view.paths_via_relay_parent(&CHAIN_B[min_min_idx + 1]), + vec![CHAIN_B[min_min_idx..].to_vec()] + ); + assert_eq!( + view.paths_via_relay_parent(&leaf), + vec![CHAIN_B[min_min_idx..].to_vec()] + ); } ); @@ -918,6 +998,12 @@ mod tests { assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + + assert!(view.paths_via_relay_parent(&CHAIN_A[0]).is_empty()); + assert_eq!( + view.paths_via_relay_parent(&CHAIN_B[min_min_idx]), + vec![CHAIN_B[min_min_idx..].to_vec()] + ); } ); @@ -986,6 +1072,12 @@ mod tests { assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + + assert!(view.paths_via_relay_parent(&GENESIS_HASH).is_empty()); + assert_eq!( + view.paths_via_relay_parent(&CHAIN_A[0]), + vec![CHAIN_A.to_vec()] + ); } ); } @@ -1160,4 +1252,69 @@ mod tests { Some(hashes) if hashes == &[GENESIS_HASH] ); } + + #[test] + fn path_with_fork() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + assert_eq!(view.collating_for, None); + + // Chain A + let prospective_response = vec![(PARA_A, 0)]; // was PARA_A_MIN_PARENT + let leaf = CHAIN_A.last().unwrap(); + let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); + let leaf_idx = blocks.len() - 1; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + res.expect("`activate_leaf` timed out").unwrap(); + }); + let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[leaf_idx..]).await; + assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[..leaf_idx]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Chain B + let prospective_response = vec![(PARA_A, 1)]; + + let leaf = CHAIN_B.last().unwrap(); + let leaf_idx = CHAIN_B.len() - 1; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + res.expect("`activate_leaf` timed out").unwrap(); + }); + let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[leaf_idx..]).await; + assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[0..leaf_idx]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + assert_eq!(view.leaves.len(), 2); + + let mut paths_to_genesis = view.paths_via_relay_parent(&GENESIS_HASH); + paths_to_genesis.sort(); + let mut expected_paths_to_genesis = vec![ + [GENESIS_HASH].iter().chain(CHAIN_A.iter()).copied().collect::>(), + [GENESIS_HASH].iter().chain(CHAIN_B.iter()).copied().collect::>(), + ]; + expected_paths_to_genesis.sort(); + assert_eq!(paths_to_genesis, expected_paths_to_genesis); + + let path_to_leaf_in_a = view.paths_via_relay_parent(&CHAIN_A[1]); + let expected_path_to_leaf_in_a = + vec![[GENESIS_HASH].iter().chain(CHAIN_A.iter()).copied().collect::>()]; + assert_eq!(path_to_leaf_in_a, expected_path_to_leaf_in_a); + + let path_to_leaf_in_b = view.paths_via_relay_parent(&CHAIN_B[4]); + let expected_path_to_leaf_in_b = + vec![[GENESIS_HASH].iter().chain(CHAIN_B.iter()).copied().collect::>()]; + assert_eq!(path_to_leaf_in_b, expected_path_to_leaf_in_b); + + assert_eq!(view.paths_via_relay_parent(&Hash::repeat_byte(0x0A)), Vec::>::new()); + } } diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml index ce4bceec7336..4f30d3ce9c09 100644 --- a/polkadot/node/subsystem/Cargo.toml +++ b/polkadot/node/subsystem/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index 6e09bb9e4310..f34bb62a7cf0 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -451,8 +451,8 @@ pub fn construct_extrinsic( /// Construct a transfer extrinsic. pub fn construct_transfer_extrinsic( client: &Client, - origin: sp_keyring::AccountKeyring, - dest: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, + dest: sp_keyring::Sr25519Keyring, value: Balance, ) -> UncheckedExtrinsic { let function = diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml index d98377e53759..0fbf526ccb8b 100644 --- a/polkadot/node/tracking-allocator/Cargo.toml +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -5,6 +5,8 @@ version = "2.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 9d0518fd46ad..ea6c4423dc19 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "6.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index c5757928c3fc..1f2f9e2e9cdc 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -57,6 +57,8 @@ impl HeadData { } } +impl codec::EncodeLike for alloc::vec::Vec {} + /// Parachain validation code. #[derive( PartialEq, @@ -154,6 +156,9 @@ pub struct BlockData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec #[cfg_attr(feature = "std", derive(derive_more::Display))] pub struct Id(u32); +impl codec::EncodeLike for Id {} +impl codec::EncodeLike for u32 {} + impl TypeId for Id { const TYPE_ID: [u8; 4] = *b"para"; } diff --git a/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs b/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs index 85abf8bf36b9..5d728517c4bb 100644 --- a/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs @@ -23,7 +23,7 @@ #[tokio::test(flavor = "multi_thread")] async fn collating_using_adder_collator() { use polkadot_primitives::Id as ParaId; - use sp_keyring::AccountKeyring::*; + use sp_keyring::Sr25519Keyring::*; let mut builder = sc_cli::LoggerBuilder::new(""); builder.with_colors(false); diff --git a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs index 8be535b9bb4c..b8e32b13bc9c 100644 --- a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs @@ -22,7 +22,7 @@ #[tokio::test(flavor = "multi_thread")] async fn collating_using_undying_collator() { use polkadot_primitives::Id as ParaId; - use sp_keyring::AccountKeyring::*; + use sp_keyring::Sr25519Keyring::*; let mut builder = sc_cli::LoggerBuilder::new(""); builder.with_colors(false); diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index dd269caa2d60..150aaf153fa7 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Shared primitives used by Polkadot runtime" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md index 432d9ab69bab..586a4169b5bc 100644 --- a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md @@ -151,12 +151,6 @@ time per relay parent. This reduces the bandwidth requirements and as we can sec the others are probably not required anyway. If the request times out, we need to note the collator as being unreliable and reduce its priority relative to other collators. -As a validator, once the collation has been fetched some other subsystem will inspect and do deeper validation of the -collation. The subsystem will report to this subsystem with a [`CollatorProtocolMessage`][CPM]`::ReportCollator`. In -that case, if we are connected directly to the collator, we apply a cost to the `PeerId` associated with the collator -and potentially disconnect or blacklist it. If the collation is seconded, we notify the collator and apply a benefit to -the `PeerId` associated with the collator. - ### Interaction with [Candidate Backing][CB] As collators advertise the availability, a validator will simply second the first valid parablock candidate per relay diff --git a/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md b/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md index a3ca7347eb63..a96f3fa3d4a0 100644 --- a/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md +++ b/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md @@ -129,7 +129,6 @@ digraph { cand_sel -> coll_prot [arrowhead = "diamond", label = "FetchCollation"] cand_sel -> cand_back [arrowhead = "onormal", label = "Second"] - cand_sel -> coll_prot [arrowhead = "onormal", label = "ReportCollator"] cand_val -> runt_api [arrowhead = "diamond", label = "Request::PersistedValidationData"] cand_val -> runt_api [arrowhead = "diamond", label = "Request::ValidationCode"] @@ -231,7 +230,7 @@ sequenceDiagram VS ->> CandidateSelection: Collation - Note over CandidateSelection: Lots of other machinery in play here,
but there are only three outcomes from the
perspective of the `CollatorProtocol`: + Note over CandidateSelection: Lots of other machinery in play here,
but there are only two outcomes from the
perspective of the `CollatorProtocol`: alt happy path CandidateSelection -->> VS: FetchCollation @@ -242,10 +241,6 @@ sequenceDiagram NB ->> VS: Collation Deactivate VS - else collation invalid or unexpected - CandidateSelection ->> VS: ReportCollator - VS ->> NB: ReportPeer - else CandidateSelection already selected a different candidate Note over CandidateSelection: silently drop end diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index 85415e42a11c..cb862440727b 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -436,9 +436,6 @@ enum CollatorProtocolMessage { DistributeCollation(CandidateReceipt, PoV, Option>), /// Fetch a collation under the given relay-parent for the given ParaId. FetchCollation(Hash, ParaId, ResponseChannel<(CandidateReceipt, PoV)>), - /// Report a collator as having provided an invalid collation. This should lead to disconnect - /// and blacklist of the collator. - ReportCollator(CollatorId), /// Note a collator as having provided a good collation. NoteGoodCollation(CollatorId, SignedFullStatement), /// Notify a collator that its collation was seconded. diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index d01528d4dee0..48980dde4bbc 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Polkadot specific RPC functionality." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 01b56b31cf20..4b307b56bcbe 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -5,6 +5,8 @@ description = "Pallets and constants used in Relay Chain networks." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -140,6 +142,7 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 02810b75283f..3f110bdd76c6 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Helper crate for generating slot ranges for the Polkadot runtime." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index 78f20d918bab..3ac1ba2dc4e0 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -339,10 +339,10 @@ impl Auctioneer> for Pallet { let sample_length = T::SampleLength::get().max(One::one()); let sample = after_early_end / sample_length; let sub_sample = after_early_end % sample_length; - return AuctionStatus::EndingPeriod(sample, sub_sample) + return AuctionStatus::EndingPeriod(sample, sub_sample); } else { // This is safe because of the comparison operator above - return AuctionStatus::VrfDelay(after_early_end - ending_period) + return AuctionStatus::VrfDelay(after_early_end - ending_period); } } @@ -559,7 +559,7 @@ impl Pallet { #[allow(deprecated)] Winning::::remove_all(None); AuctionInfo::::kill(); - return Some((res, lease_period_index)) + return Some((res, lease_period_index)); } } } @@ -765,11 +765,11 @@ mod tests { let (current_lease_period, _) = Self::lease_period_index(now).ok_or(LeaseError::NoLeasePeriod)?; if period_begin < current_lease_period { - return Err(LeaseError::AlreadyEnded) + return Err(LeaseError::AlreadyEnded); } for period in period_begin..(period_begin + period_count) { if leases.contains_key(&(para, period)) { - return Err(LeaseError::AlreadyLeased) + return Err(LeaseError::AlreadyLeased); } leases.insert((para, period), LeaseData { leaser: *leaser, amount }); } @@ -1718,7 +1718,7 @@ mod benchmarking { use polkadot_runtime_parachains::paras; use sp_runtime::{traits::Bounded, SaturatedConversion}; - use frame_benchmarking::{account, benchmarks, whitelisted_caller, BenchmarkError}; + use frame_benchmarking::v2::*; fn assert_last_event(generic_event: ::RuntimeEvent) { let events = frame_system::Pallet::::events(); @@ -1772,25 +1772,37 @@ mod benchmarking { } } - benchmarks! { - where_clause { where T: pallet_babe::Config + paras::Config } + #[benchmarks( + where T: pallet_babe::Config + paras::Config, + )] + mod benchmarks { + use super::*; - new_auction { + #[benchmark] + fn new_auction() -> Result<(), BenchmarkError> { let duration = BlockNumberFor::::max_value(); let lease_period_index = LeasePeriodOf::::max_value(); - let origin = - T::InitiateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _(origin, duration, lease_period_index) - verify { - assert_last_event::(Event::::AuctionStarted { - auction_index: AuctionCounter::::get(), - lease_period: LeasePeriodOf::::max_value(), - ending: BlockNumberFor::::max_value(), - }.into()); + let origin = T::InitiateOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, duration, lease_period_index); + + assert_last_event::( + Event::::AuctionStarted { + auction_index: AuctionCounter::::get(), + lease_period: LeasePeriodOf::::max_value(), + ending: BlockNumberFor::::max_value(), + } + .into(), + ); + + Ok(()) } // Worst case scenario a new bid comes in which kicks out an existing bid for the same slot. - bid { + #[benchmark] + fn bid() -> Result<(), BenchmarkError> { // If there is an offset, we need to be on that block to be able to do lease things. let (_, offset) = T::Leaser::lease_period_length(); frame_system::Pallet::::set_block_number(offset + One::one()); @@ -1810,8 +1822,18 @@ mod benchmarking { CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); let worst_head_data = T::Registrar::worst_head_data(); let worst_validation_code = T::Registrar::worst_validation_code(); - T::Registrar::register(owner.clone(), para, worst_head_data.clone(), worst_validation_code.clone())?; - T::Registrar::register(owner, new_para, worst_head_data, worst_validation_code.clone())?; + T::Registrar::register( + owner.clone(), + para, + worst_head_data.clone(), + worst_validation_code.clone(), + )?; + T::Registrar::register( + owner, + new_para, + worst_head_data, + worst_validation_code.clone(), + )?; assert_ok!(paras::Pallet::::add_trusted_validation_code( frame_system::Origin::::Root.into(), worst_validation_code, @@ -1839,15 +1861,28 @@ mod benchmarking { CurrencyOf::::make_free_balance_be(&caller, BalanceOf::::max_value()); let bigger_amount = CurrencyOf::::minimum_balance().saturating_mul(10u32.into()); assert_eq!(CurrencyOf::::reserved_balance(&first_bidder), first_amount); - }: _(RawOrigin::Signed(caller.clone()), new_para, auction_index, first_slot, last_slot, bigger_amount) - verify { - // Confirms that we unreserved funds from a previous bidder, which is worst case scenario. + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + new_para, + auction_index, + first_slot, + last_slot, + bigger_amount, + ); + + // Confirms that we unreserved funds from a previous bidder, which is worst case + // scenario. assert_eq!(CurrencyOf::::reserved_balance(&caller), bigger_amount); + + Ok(()) } - // Worst case: 10 bidders taking all wining spots, and we need to calculate the winner for auction end. - // Entire winner map should be full and removed at the end of the benchmark. - on_initialize { + // Worst case: 10 bidders taking all wining spots, and we need to calculate the winner for + // auction end. Entire winner map should be full and removed at the end of the benchmark. + #[benchmark] + fn on_initialize() -> Result<(), BenchmarkError> { // If there is an offset, we need to be on that block to be able to do lease things. let (lease_length, offset) = T::Leaser::lease_period_length(); frame_system::Pallet::::set_block_number(offset + One::one()); @@ -1868,7 +1903,7 @@ mod benchmarking { let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); // Make winning map full - for i in 0u32 .. (T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { + for i in 0u32..(T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); } @@ -1882,20 +1917,29 @@ mod benchmarking { let authorities = pallet_babe::Pallet::::authorities(); // Check for non empty authority set since it otherwise emits a No-OP warning. if !authorities.is_empty() { - pallet_babe::Pallet::::enact_epoch_change(authorities.clone(), authorities, None); + pallet_babe::Pallet::::enact_epoch_change( + authorities.clone(), + authorities, + None, + ); } } - }: { - Auctions::::on_initialize(duration + now + T::EndingPeriod::get()); - } verify { + #[block] + { + Auctions::::on_initialize(duration + now + T::EndingPeriod::get()); + } + let auction_index = AuctionCounter::::get(); assert_last_event::(Event::::AuctionClosed { auction_index }.into()); assert!(Winning::::iter().count().is_zero()); + + Ok(()) } // Worst case: 10 bidders taking all wining spots, and winning data is full. - cancel_auction { + #[benchmark] + fn cancel_auction() -> Result<(), BenchmarkError> { // If there is an offset, we need to be on that block to be able to do lease things. let (lease_length, offset) = T::Leaser::lease_period_length(); frame_system::Pallet::::set_block_number(offset + One::one()); @@ -1903,7 +1947,6 @@ mod benchmarking { // Create a new auction let duration: BlockNumberFor = lease_length / 2u32.into(); let lease_period_index = LeasePeriodOf::::zero(); - let now = frame_system::Pallet::::block_number(); let origin = T::InitiateOrigin::try_successful_origin() .expect("InitiateOrigin has no successful origin required for the benchmark"); Auctions::::new_auction(origin, duration, lease_period_index)?; @@ -1916,13 +1959,16 @@ mod benchmarking { } // Make winning map full - for i in 0u32 .. (T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { + for i in 0u32..(T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); } assert!(AuctionInfo::::get().is_some()); - }: _(RawOrigin::Root) - verify { + + #[extrinsic_call] + _(RawOrigin::Root); + assert!(AuctionInfo::::get().is_none()); + Ok(()) } impl_benchmark_test_suite!( diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs index 126c886280e6..e3835b692526 100644 --- a/polkadot/runtime/common/src/identity_migrator.rs +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -160,12 +160,22 @@ pub trait OnReapIdentity { /// - `bytes`: The byte size of `IdentityInfo`. /// - `subs`: The number of sub-accounts they had. fn on_reap_identity(who: &AccountId, bytes: u32, subs: u32) -> DispatchResult; + + /// Ensure that identity reaping will be succesful in benchmarking. + /// + /// Should setup the state in a way that the same call ot `[Self::on_reap_identity]` will be + /// successful. + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(who: &AccountId, bytes: u32, subs: u32); } impl OnReapIdentity for () { fn on_reap_identity(_who: &AccountId, _bytes: u32, _subs: u32) -> DispatchResult { Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) {} } #[cfg(feature = "runtime-benchmarks")] @@ -219,6 +229,12 @@ mod benchmarks { } Identity::::set_subs(target_origin.clone(), subs.clone())?; + T::ReapIdentityHandler::ensure_successful_identity_reaping( + &target, + info.encoded_size() as u32, + subs.len() as u32, + ); + // add registrars and provide judgements let registrar_origin = T::RegistrarOrigin::try_successful_origin() .expect("RegistrarOrigin has no successful origin required for the benchmark"); diff --git a/polkadot/runtime/common/src/paras_sudo_wrapper.rs b/polkadot/runtime/common/src/paras_sudo_wrapper.rs index af93c70b4783..bd5984b3b63e 100644 --- a/polkadot/runtime/common/src/paras_sudo_wrapper.rs +++ b/polkadot/runtime/common/src/paras_sudo_wrapper.rs @@ -24,7 +24,7 @@ pub use pallet::*; use polkadot_primitives::Id as ParaId; use polkadot_runtime_parachains::{ configuration, dmp, hrmp, - paras::{self, AssignCoretime, ParaGenesisArgs}, + paras::{self, AssignCoretime, ParaGenesisArgs, ParaKind}, ParaLifecycle, }; @@ -48,6 +48,8 @@ pub mod pallet { /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a /// downward message. ExceedsMaxMessageSize, + /// A DMP message couldn't be sent because the destination is unreachable. + Unroutable, /// Could not schedule para cleanup. CouldntCleanup, /// Not a parathread (on-demand parachain). @@ -80,10 +82,15 @@ pub mod pallet { genesis: ParaGenesisArgs, ) -> DispatchResult { ensure_root(origin)?; + + let assign_coretime = genesis.para_kind == ParaKind::Parachain; + polkadot_runtime_parachains::schedule_para_initialize::(id, genesis) .map_err(|_| Error::::ParaAlreadyExists)?; - T::AssignCoretime::assign_coretime(id)?; + if assign_coretime { + T::AssignCoretime::assign_coretime(id)?; + } Ok(()) } @@ -152,6 +159,7 @@ pub mod pallet { { dmp::QueueDownwardMessageError::ExceedsMaxMessageSize => Error::::ExceedsMaxMessageSize.into(), + dmp::QueueDownwardMessageError::Unroutable => Error::::Unroutable.into(), }) } diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 7ff7f69faf14..32ea4fdd2f27 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -138,6 +138,13 @@ where .map(|()| hash) .map_err(|_| SendError::Transport(&"Error placing into DMP queue")) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + if let Some((0, [Parachain(id)])) = location.as_ref().map(|l| l.unpack()) { + dmp::Pallet::::make_parachain_reachable(*id); + } + } } impl InspectMessageQueues for ChildParachainRouter { @@ -190,7 +197,7 @@ impl< ExistentialDeposit: Get>, PriceForDelivery: PriceForMessageDelivery, Parachain: Get, - ToParachainHelper: EnsureForParachain, + ToParachainHelper: polkadot_runtime_parachains::EnsureForParachain, > xcm_builder::EnsureDelivery for ToParachainDeliveryHelper< XcmConfig, @@ -219,6 +226,9 @@ impl< return (None, None) } + // allow more initialization for target parachain + ToParachainHelper::ensure(Parachain::get()); + let mut fees_mode = None; if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { // if not waived, we need to set up accounts for paying and receiving fees @@ -238,9 +248,6 @@ impl< XcmConfig::AssetTransactor::deposit_asset(&fee, &origin_ref, None).unwrap(); } - // allow more initialization for target parachain - ToParachainHelper::ensure(Parachain::get()); - // expected worst case - direct withdraw fees_mode = Some(FeesMode { jit_withdraw: true }); } @@ -248,18 +255,6 @@ impl< } } -/// Ensure more initialization for `ParaId`. (e.g. open HRMP channels, ...) -#[cfg(feature = "runtime-benchmarks")] -pub trait EnsureForParachain { - fn ensure(para_id: ParaId); -} -#[cfg(feature = "runtime-benchmarks")] -impl EnsureForParachain for () { - fn ensure(_: ParaId) { - // doing nothing - } -} - #[cfg(test)] mod tests { use super::*; @@ -349,6 +344,8 @@ mod tests { c.max_downward_message_size = u32::MAX; }); + dmp::Pallet::::make_parachain_reachable(5555); + // Check that the good message is validated: assert_ok!(::validate( &mut Some(dest.into()), diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 3709e1eb697e..0415e4754009 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Runtime metric interface for the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index a3eec3f9d961..b583e9c6cc50 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -5,6 +5,8 @@ description = "Relay Chain runtime code responsible for Parachains." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -138,6 +140,7 @@ runtime-benchmarks = [ "sp-std", "static_assertions", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support-test/try-runtime", diff --git a/polkadot/runtime/parachains/src/coretime/benchmarking.rs b/polkadot/runtime/parachains/src/coretime/benchmarking.rs index 6d593f1954ff..49e3d8a88c01 100644 --- a/polkadot/runtime/parachains/src/coretime/benchmarking.rs +++ b/polkadot/runtime/parachains/src/coretime/benchmarking.rs @@ -43,6 +43,8 @@ mod benchmarks { .unwrap(); on_demand::Revenue::::put(rev); + crate::paras::Heads::::insert(ParaId::from(T::BrokerId::get()), vec![1, 2, 3]); + ::Currency::make_free_balance_be( &>::account_id(), minimum_balance * (mhr * (mhr + 1)).into(), diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 966b7997a277..5656e92b90be 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -136,6 +136,11 @@ pub mod pallet { type AssetTransactor: TransactAsset; /// AccountId to Location converter type AccountToLocation: for<'a> TryConvert<&'a Self::AccountId, Location>; + + /// Maximum weight for any XCM transact call that should be executed on the coretime chain. + /// + /// Basically should be `max_weight(set_leases, reserve, notify_core_count)`. + type MaxXcmTransactWeight: Get; } #[pallet::event] @@ -333,6 +338,7 @@ impl OnNewSession> for Pallet { fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruction<()> { Instruction::Transact { origin_kind: OriginKind::Superuser, + fallback_max_weight: Some(T::MaxXcmTransactWeight::get()), call: BrokerRuntimePallets::Broker(call).encode().into(), } } diff --git a/polkadot/runtime/parachains/src/dmp.rs b/polkadot/runtime/parachains/src/dmp.rs index 03580e11b8e9..3c9cf8004186 100644 --- a/polkadot/runtime/parachains/src/dmp.rs +++ b/polkadot/runtime/parachains/src/dmp.rs @@ -44,7 +44,7 @@ use crate::{ configuration::{self, HostConfiguration}, - initializer, FeeTracker, + initializer, paras, FeeTracker, }; use alloc::vec::Vec; use core::fmt; @@ -72,12 +72,15 @@ const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0 pub enum QueueDownwardMessageError { /// The message being sent exceeds the configured max message size. ExceedsMaxMessageSize, + /// The destination is unknown. + Unroutable, } impl From for SendError { fn from(err: QueueDownwardMessageError) -> Self { match err { QueueDownwardMessageError::ExceedsMaxMessageSize => SendError::ExceedsMaxMessageSize, + QueueDownwardMessageError::Unroutable => SendError::Unroutable, } } } @@ -116,7 +119,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + configuration::Config {} + pub trait Config: frame_system::Config + configuration::Config + paras::Config {} /// The downward messages addressed for a certain para. #[pallet::storage] @@ -200,6 +203,11 @@ impl Pallet { return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) } + // If the head exists, we assume the parachain is legit and exists. + if !paras::Heads::::contains_key(para) { + return Err(QueueDownwardMessageError::Unroutable) + } + Ok(()) } @@ -217,14 +225,7 @@ impl Pallet { msg: DownwardMessage, ) -> Result<(), QueueDownwardMessageError> { let serialized_len = msg.len() as u32; - if serialized_len > config.max_downward_message_size { - return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) - } - - // Hard limit on Queue size - if Self::dmq_length(para) > Self::dmq_max_length(config.max_downward_message_size) { - return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) - } + Self::can_queue_downward_message(config, ¶, &msg)?; let inbound = InboundDownwardMessage { msg, sent_at: frame_system::Pallet::::block_number() }; @@ -336,6 +337,15 @@ impl Pallet { ) -> Vec>> { DownwardMessageQueues::::get(&recipient) } + + /// Make the parachain reachable for downward messages. + /// + /// Only useable in benchmarks or tests. + #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] + pub fn make_parachain_reachable(para: impl Into) { + let para = para.into(); + crate::paras::Heads::::insert(para, para.encode()); + } } impl FeeTracker for Pallet { @@ -359,3 +369,10 @@ impl FeeTracker for Pallet { }) } } + +#[cfg(feature = "runtime-benchmarks")] +impl crate::EnsureForParachain for Pallet { + fn ensure(para: ParaId) { + Self::make_parachain_reachable(para); + } +} diff --git a/polkadot/runtime/parachains/src/dmp/tests.rs b/polkadot/runtime/parachains/src/dmp/tests.rs index de1515958125..617c9488bd2a 100644 --- a/polkadot/runtime/parachains/src/dmp/tests.rs +++ b/polkadot/runtime/parachains/src/dmp/tests.rs @@ -61,6 +61,12 @@ fn queue_downward_message( Dmp::queue_downward_message(&configuration::ActiveConfig::::get(), para_id, msg) } +fn register_paras(paras: &[ParaId]) { + paras.iter().for_each(|p| { + Dmp::make_parachain_reachable(*p); + }); +} + #[test] fn clean_dmp_works() { let a = ParaId::from(1312); @@ -68,6 +74,8 @@ fn clean_dmp_works() { let c = ParaId::from(123); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a, b, c]); + // enqueue downward messages to A, B and C. queue_downward_message(a, vec![1, 2, 3]).unwrap(); queue_downward_message(b, vec![4, 5, 6]).unwrap(); @@ -89,6 +97,8 @@ fn dmq_length_and_head_updated_properly() { let b = ParaId::from(228); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a, b]); + assert_eq!(Dmp::dmq_length(a), 0); assert_eq!(Dmp::dmq_length(b), 0); @@ -101,11 +111,30 @@ fn dmq_length_and_head_updated_properly() { }); } +#[test] +fn dmq_fail_if_para_does_not_exist() { + let a = ParaId::from(1312); + + new_test_ext(default_genesis_config()).execute_with(|| { + assert_eq!(Dmp::dmq_length(a), 0); + + assert!(matches!( + queue_downward_message(a, vec![1, 2, 3]), + Err(QueueDownwardMessageError::Unroutable) + )); + + assert_eq!(Dmp::dmq_length(a), 0); + assert!(Dmp::dmq_mqc_head(a).is_zero()); + }); +} + #[test] fn dmp_mqc_head_fixture() { let a = ParaId::from(2000); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + run_to_block(2, None); assert!(Dmp::dmq_mqc_head(a).is_zero()); queue_downward_message(a, vec![1, 2, 3]).unwrap(); @@ -125,6 +154,8 @@ fn check_processed_downward_messages() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + let block_number = System::block_number(); // processed_downward_messages=0 is allowed when the DMQ is empty. @@ -150,6 +181,8 @@ fn check_processed_downward_messages_advancement_rule() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + let block_number = System::block_number(); run_to_block(block_number + 1, None); @@ -170,6 +203,8 @@ fn dmq_pruning() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + assert_eq!(Dmp::dmq_length(a), 0); queue_downward_message(a, vec![1, 2, 3]).unwrap(); @@ -194,6 +229,8 @@ fn queue_downward_message_critical() { genesis.configuration.config.max_downward_message_size = 7; new_test_ext(genesis).execute_with(|| { + register_paras(&[a]); + let smol = [0; 3].to_vec(); let big = [0; 8].to_vec(); @@ -215,6 +252,8 @@ fn verify_dmq_mqc_head_is_externally_accessible() { let a = ParaId::from(2020); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + let head = sp_io::storage::get(&well_known_keys::dmq_mqc_head(a)); assert_eq!(head, None); @@ -235,9 +274,12 @@ fn verify_dmq_mqc_head_is_externally_accessible() { #[test] fn verify_fee_increase_and_decrease() { let a = ParaId::from(123); + let mut genesis = default_genesis_config(); genesis.configuration.config.max_downward_message_size = 16777216; new_test_ext(genesis).execute_with(|| { + register_paras(&[a]); + let initial = InitialFactor::get(); assert_eq!(DeliveryFeeFactor::::get(a), initial); @@ -287,6 +329,8 @@ fn verify_fee_factor_reaches_high_value() { let mut genesis = default_genesis_config(); genesis.configuration.config.max_downward_message_size = 51200; new_test_ext(genesis).execute_with(|| { + register_paras(&[a]); + let max_messages = Dmp::dmq_max_length(ActiveConfig::::get().max_downward_message_size); let mut total_fee_factor = FixedU128::from_float(1.0); diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 828c0b9bcef2..b1ff5419470e 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -114,3 +114,19 @@ pub fn schedule_code_upgrade( pub fn set_current_head(id: ParaId, new_head: HeadData) { paras::Pallet::::set_current_head(id, new_head) } + +/// Ensure more initialization for `ParaId` when benchmarking. (e.g. open HRMP channels, ...) +#[cfg(feature = "runtime-benchmarks")] +pub trait EnsureForParachain { + fn ensure(para_id: ParaId); +} + +#[cfg(feature = "runtime-benchmarks")] +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl EnsureForParachain for Tuple { + fn ensure(para: ParaId) { + for_tuples!( #( + Tuple::ensure(para); + )* ); + } +} diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index d701e1f9bd80..ee1990a7b618 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -421,6 +421,7 @@ impl assigner_coretime::Config for Test {} parameter_types! { pub const BrokerId: u32 = 10u32; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); } pub struct BrokerPot; @@ -437,6 +438,7 @@ impl coretime::Config for Test { type BrokerId = BrokerId; type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; + type MaxXcmTransactWeight = MaxXcmTransactWeight; type BrokerPotLocation = BrokerPot; type AssetTransactor = (); type AccountToLocation = (); diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 7bf8b833ed91..4d617cbb05bb 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -17,7 +17,7 @@ use super::*; use crate::configuration::HostConfiguration; use alloc::vec; -use frame_benchmarking::benchmarks; +use frame_benchmarking::v2::*; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use polkadot_primitives::{ HeadData, Id as ParaId, ValidationCode, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, @@ -84,41 +84,58 @@ fn generate_disordered_actions_queue() { }); } -benchmarks! { - force_set_current_code { - let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn force_set_current_code(c: Linear) { let new_code = ValidationCode(vec![0; c as usize]); let para_id = ParaId::from(c as u32); CurrentCodeHash::::insert(¶_id, new_code.hash()); generate_disordered_pruning::(); - }: _(RawOrigin::Root, para_id, new_code) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, para_id, new_code); + assert_last_event::(Event::CurrentCodeUpdated(para_id).into()); } - force_set_current_head { - let s in MIN_CODE_SIZE .. MAX_HEAD_DATA_SIZE; + + #[benchmark] + fn force_set_current_head(s: Linear) { let new_head = HeadData(vec![0; s as usize]); let para_id = ParaId::from(1000); - }: _(RawOrigin::Root, para_id, new_head) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, para_id, new_head); + assert_last_event::(Event::CurrentHeadUpdated(para_id).into()); } - force_set_most_recent_context { + + #[benchmark] + fn force_set_most_recent_context() { let para_id = ParaId::from(1000); let context = BlockNumberFor::::from(1000u32); - }: _(RawOrigin::Root, para_id, context) - force_schedule_code_upgrade { - let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; + + #[extrinsic_call] + _(RawOrigin::Root, para_id, context); + } + + #[benchmark] + fn force_schedule_code_upgrade(c: Linear) { let new_code = ValidationCode(vec![0; c as usize]); let para_id = ParaId::from(c as u32); let block = BlockNumberFor::::from(c); generate_disordered_upgrades::(); - }: _(RawOrigin::Root, para_id, new_code, block) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, para_id, new_code, block); + assert_last_event::(Event::CodeUpgradeScheduled(para_id).into()); } - force_note_new_head { - let s in MIN_CODE_SIZE .. MAX_HEAD_DATA_SIZE; + + #[benchmark] + fn force_note_new_head(s: Linear) { let para_id = ParaId::from(1000); let new_head = HeadData(vec![0; s as usize]); let old_code_hash = ValidationCode(vec![0]).hash(); @@ -135,70 +152,101 @@ benchmarks! { &config, UpgradeStrategy::SetGoAheadSignal, ); - }: _(RawOrigin::Root, para_id, new_head) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, para_id, new_head); + assert_last_event::(Event::NewHeadNoted(para_id).into()); } - force_queue_action { + + #[benchmark] + fn force_queue_action() { let para_id = ParaId::from(1000); generate_disordered_actions_queue::(); - }: _(RawOrigin::Root, para_id) - verify { - let next_session = crate::shared::CurrentSessionIndex::::get().saturating_add(One::one()); + + #[extrinsic_call] + _(RawOrigin::Root, para_id); + + let next_session = + crate::shared::CurrentSessionIndex::::get().saturating_add(One::one()); assert_last_event::(Event::ActionQueued(para_id, next_session).into()); } - add_trusted_validation_code { - let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; + #[benchmark] + fn add_trusted_validation_code(c: Linear) { let new_code = ValidationCode(vec![0; c as usize]); pvf_check::prepare_bypassing_bench::(new_code.clone()); - }: _(RawOrigin::Root, new_code) - poke_unused_validation_code { + #[extrinsic_call] + _(RawOrigin::Root, new_code); + } + + #[benchmark] + fn poke_unused_validation_code() { let code_hash = [0; 32].into(); - }: _(RawOrigin::Root, code_hash) - include_pvf_check_statement { + #[extrinsic_call] + _(RawOrigin::Root, code_hash); + } + + #[benchmark] + fn include_pvf_check_statement() { let (stmt, signature) = pvf_check::prepare_inclusion_bench::(); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } - include_pvf_check_statement_finalize_upgrade_accept { - let (stmt, signature) = pvf_check::prepare_finalization_bench::( - VoteCause::Upgrade, - VoteOutcome::Accept, - ); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + #[benchmark] + fn include_pvf_check_statement_finalize_upgrade_accept() { + let (stmt, signature) = + pvf_check::prepare_finalization_bench::(VoteCause::Upgrade, VoteOutcome::Accept); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } - include_pvf_check_statement_finalize_upgrade_reject { - let (stmt, signature) = pvf_check::prepare_finalization_bench::( - VoteCause::Upgrade, - VoteOutcome::Reject, - ); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + #[benchmark] + fn include_pvf_check_statement_finalize_upgrade_reject() { + let (stmt, signature) = + pvf_check::prepare_finalization_bench::(VoteCause::Upgrade, VoteOutcome::Reject); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } - include_pvf_check_statement_finalize_onboarding_accept { - let (stmt, signature) = pvf_check::prepare_finalization_bench::( - VoteCause::Onboarding, - VoteOutcome::Accept, - ); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + #[benchmark] + fn include_pvf_check_statement_finalize_onboarding_accept() { + let (stmt, signature) = + pvf_check::prepare_finalization_bench::(VoteCause::Onboarding, VoteOutcome::Accept); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } - include_pvf_check_statement_finalize_onboarding_reject { - let (stmt, signature) = pvf_check::prepare_finalization_bench::( - VoteCause::Onboarding, - VoteOutcome::Reject, - ); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + #[benchmark] + fn include_pvf_check_statement_finalize_onboarding_reject() { + let (stmt, signature) = + pvf_check::prepare_finalization_bench::(VoteCause::Onboarding, VoteOutcome::Reject); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } impl_benchmark_test_suite!( diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 3b11c977edf3..1fd32c5d0c32 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -6,6 +6,8 @@ description = "Rococo testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -275,6 +277,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 1d0adac44af4..921bc8f5fe92 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -5,6 +5,8 @@ description = "Constants used throughout the Rococo network." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index ab796edc54b1..a5cb2eddfa0d 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -21,7 +21,7 @@ use core::marker::PhantomData; use frame_support::pallet_prelude::DispatchResult; use frame_system::RawOrigin; use polkadot_primitives::Balance; -use polkadot_runtime_common::identity_migrator::OnReapIdentity; +use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; use rococo_runtime_constants::currency::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; use xcm_executor::traits::TransactAsset; @@ -88,7 +88,10 @@ where AccountId: Into<[u8; 32]> + Clone + Encode, { fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { - use crate::impls::IdentityMigratorCalls::PokeDeposit; + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::polkadot_runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; let total_to_send = Self::calculate_remote_deposit(fields, subs); @@ -141,6 +144,7 @@ where .into(); let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); // Actual program to execute on People Chain. let program: Xcm<()> = Xcm(vec![ @@ -157,7 +161,11 @@ where .into(), }, // Poke the deposit to reserve the appropriate amount on the parachain. - Transact { origin_kind: OriginKind::Superuser, call: poke.encode().into() }, + Transact { + origin_kind: OriginKind::Superuser, + fallback_max_weight: Some(remote_weight_limit), + call: poke.encode().into(), + }, ]); // send @@ -168,4 +176,9 @@ where )?; Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) { + crate::Dmp::make_parachain_reachable(1004); + } } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 96a97faa4750..2303e1212634 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -20,6 +20,17 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] +#[cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), target_feature = "e"))] +// Allocate 2 MiB stack. +// +// TODO: A workaround. Invoke polkavm_derive::min_stack_size!() instead +// later on. +::core::arch::global_asm!( + ".pushsection .polkavm_min_stack_size,\"R\",@note\n", + ".4byte 2097152", + ".popsection\n", +); + extern crate alloc; use alloc::{ @@ -767,6 +778,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -971,6 +983,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl parachains_origin::Config for Runtime {} @@ -1098,6 +1111,7 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); + pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } pub struct BrokerPot; @@ -1121,6 +1135,7 @@ impl coretime::Config for Runtime { xcm_config::ThisNetwork, ::AccountId, >; + type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { @@ -2467,14 +2482,14 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - (), + Dmp, >, polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, RandomParaId, - (), + Dmp, > ); @@ -2533,7 +2548,7 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - (), + Dmp, >; fn valid_destination() -> Result { Ok(AssetHub::get()) diff --git a/polkadot/runtime/rococo/src/tests.rs b/polkadot/runtime/rococo/src/tests.rs index 01eaad87e342..0b46caec5a35 100644 --- a/polkadot/runtime/rococo/src/tests.rs +++ b/polkadot/runtime/rococo/src/tests.rs @@ -22,7 +22,7 @@ use std::collections::HashSet; use crate::xcm_config::LocationConverter; use frame_support::traits::WhitelistedStorageKeys; use sp_core::{crypto::Ss58Codec, hexdisplay::HexDisplay}; -use sp_keyring::AccountKeyring::Alice; +use sp_keyring::Sr25519Keyring::Alice; use xcm_runtime_apis::conversions::LocationToAccountHelper; #[test] diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index a28b46800874..eb27e5c5a897 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -24,6 +24,7 @@ use xcm::{latest::prelude::*, DoubleEncoded}; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmBalancesWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::latest::AssetTransferFilter; /// Types of asset supported by the Rococo runtime. @@ -111,7 +112,11 @@ impl XcmWeightInfo for RococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmBalancesWeight::::transfer_reserve_asset()) } - fn transact(_origin_kind: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_kind: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -286,8 +291,16 @@ impl XcmWeightInfo for RococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index e5915a7986bf..2dc8880c8326 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -82,7 +82,7 @@ impl WeightInfo { // Minimum execution time: 2_899_000 picoseconds. Weight::from_parts(3_090_000, 0) } - pub(crate) fn set_asset_claimer() -> Weight { + pub(crate) fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 90a0285cd17b..8b33bf9cebc6 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -154,4 +154,5 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 69ce187dce40..d4031f7ac57a 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -584,6 +584,7 @@ impl parachains_paras::Config for Runtime { parameter_types! { pub const BrokerId: u32 = 10u32; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); } pub struct BrokerPot; @@ -657,6 +658,7 @@ impl coretime::Config for Runtime { type BrokerId = BrokerId; type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; + type MaxXcmTransactWeight = MaxXcmTransactWeight; type BrokerPotLocation = BrokerPot; type AssetTransactor = (); type AccountToLocation = (); diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index f94301baab09..13e39b5aa317 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -6,6 +6,8 @@ description = "Westend testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -296,6 +298,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index 27d5b19b8e77..a50e2f9cc639 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -5,6 +5,8 @@ description = "Constants used throughout the Westend network." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index d7281dad56d4..0e0d345a0ed4 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -21,7 +21,7 @@ use core::marker::PhantomData; use frame_support::pallet_prelude::DispatchResult; use frame_system::RawOrigin; use polkadot_primitives::Balance; -use polkadot_runtime_common::identity_migrator::OnReapIdentity; +use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; use westend_runtime_constants::currency::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; use xcm_executor::traits::TransactAsset; @@ -88,7 +88,10 @@ where AccountId: Into<[u8; 32]> + Clone + Encode, { fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { - use crate::impls::IdentityMigratorCalls::PokeDeposit; + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::polkadot_runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; let total_to_send = Self::calculate_remote_deposit(fields, subs); @@ -141,6 +144,7 @@ where .into(); let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); // Actual program to execute on People Chain. let program: Xcm<()> = Xcm(vec![ @@ -157,7 +161,11 @@ where .into(), }, // Poke the deposit to reserve the appropriate amount on the parachain. - Transact { origin_kind: OriginKind::Superuser, call: poke.encode().into() }, + Transact { + origin_kind: OriginKind::Superuser, + call: poke.encode().into(), + fallback_max_weight: Some(remote_weight_limit), + }, ]); // send @@ -168,4 +176,9 @@ where )?; Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) { + crate::Dmp::make_parachain_reachable(1004); + } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7a5562cc98c1..f9ef74fee29c 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1004,6 +1004,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -1204,6 +1205,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl parachains_origin::Config for Runtime {} @@ -1324,6 +1326,7 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); + pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } pub struct BrokerPot; @@ -1347,6 +1350,7 @@ impl coretime::Config for Runtime { xcm_config::ThisNetwork, ::AccountId, >; + type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { @@ -2635,14 +2639,14 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - (), + Dmp, >, polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< xcm_config::XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, RandomParaId, - (), + Dmp, > ); @@ -2708,7 +2712,7 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - (), + Dmp, >; fn valid_destination() -> Result { Ok(AssetHub::get()) @@ -2797,8 +2801,9 @@ sp_api::impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - // The XCM executor of Westend doesn't have a configured `Aliasers` - Err(BenchmarkError::Skip) + let origin = Location::new(0, [Parachain(1000)]); + let target = Location::new(0, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); + Ok((origin, target)) } } diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index 02fd6b61496b..fcdaf7ff2de6 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -23,7 +23,7 @@ use approx::assert_relative_eq; use frame_support::traits::WhitelistedStorageKeys; use pallet_staking::EraPayout; use sp_core::{crypto::Ss58Codec, hexdisplay::HexDisplay}; -use sp_keyring::AccountKeyring::Alice; +use sp_keyring::Sr25519Keyring::Alice; use xcm_runtime_apis::conversions::LocationToAccountHelper; const MILLISECONDS_PER_HOUR: u64 = 60 * 60 * 1000; diff --git a/polkadot/runtime/westend/src/weights/pallet_balances.rs b/polkadot/runtime/westend/src/weights/pallet_balances.rs index 5e91f31920ca..deaf8840462b 100644 --- a/polkadot/runtime/westend/src/weights/pallet_balances.rs +++ b/polkadot/runtime/westend/src/weights/pallet_balances.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_balances` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `95c137a642c3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=westend-dev +// --pallet=pallet_balances +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=westend-dev -// --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,8 +56,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 43_248_000 picoseconds. - Weight::from_parts(43_872_000, 0) + // Minimum execution time: 51_474_000 picoseconds. + Weight::from_parts(52_840_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -66,8 +68,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 33_990_000 picoseconds. - Weight::from_parts(34_693_000, 0) + // Minimum execution time: 39_875_000 picoseconds. + Weight::from_parts(41_408_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -78,8 +80,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 12_681_000 picoseconds. - Weight::from_parts(13_183_000, 0) + // Minimum execution time: 19_614_000 picoseconds. + Weight::from_parts(20_194_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +92,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 17_474_000 picoseconds. - Weight::from_parts(18_063_000, 0) + // Minimum execution time: 27_430_000 picoseconds. + Weight::from_parts(28_151_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -102,8 +104,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 45_699_000 picoseconds. - Weight::from_parts(46_099_000, 0) + // Minimum execution time: 54_131_000 picoseconds. + Weight::from_parts(54_810_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -114,8 +116,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 42_453_000 picoseconds. - Weight::from_parts(43_133_000, 0) + // Minimum execution time: 48_692_000 picoseconds. + Weight::from_parts(51_416_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -126,8 +128,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 15_066_000 picoseconds. - Weight::from_parts(15_605_000, 0) + // Minimum execution time: 22_604_000 picoseconds. + Weight::from_parts(23_336_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -139,11 +141,11 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + u * (136 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 14_180_000 picoseconds. - Weight::from_parts(14_598_000, 0) + // Minimum execution time: 18_118_000 picoseconds. + Weight::from_parts(18_352_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 13_221 - .saturating_add(Weight::from_parts(13_422_901, 0).saturating_mul(u.into())) + // Standard Error: 14_688 + .saturating_add(Weight::from_parts(15_412_440, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -152,24 +154,24 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_130_000 picoseconds. - Weight::from_parts(5_257_000, 0) + // Minimum execution time: 6_779_000 picoseconds. + Weight::from_parts(7_246_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 27_328_000 picoseconds. - Weight::from_parts(27_785_000, 0) + // Minimum execution time: 30_935_000 picoseconds. + Weight::from_parts(32_251_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 17_797_000 picoseconds. - Weight::from_parts(18_103_000, 0) + // Minimum execution time: 21_002_000 picoseconds. + Weight::from_parts(21_760_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index 5be9bad824da..a5fb82a66837 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -27,6 +27,7 @@ use xcm::{ use pallet_xcm_benchmarks_fungible::WeightInfo as XcmBalancesWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; use xcm::latest::AssetTransferFilter; /// Types of asset supported by the westend runtime. @@ -114,7 +115,11 @@ impl XcmWeightInfo for WestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmBalancesWeight::::transfer_reserve_asset()) } - fn transact(_origin_kind: &OriginKind, _call: &DoubleEncoded) -> Weight { + fn transact( + _origin_kind: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -204,11 +209,17 @@ impl XcmWeightInfo for WestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - - fn set_asset_claimer(_location: &Location) -> Weight { - XcmGeneric::::set_asset_claimer() + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight } - fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() } @@ -288,8 +299,7 @@ impl XcmWeightInfo for WestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX + XcmGeneric::::alias_origin() } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() diff --git a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 076744a59753..4e10e72356ab 100644 --- a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vcatxqpx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `aa8403b52523`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=westend-dev +// --pallet=pallet_xcm_benchmarks::generic +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/xcm // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::generic -// --chain=westend-dev -// --header=./polkadot/file_header.txt -// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs -// --output=./polkadot/runtime/westend/src/weights/xcm/ +// --template=polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -63,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 69_051_000 picoseconds. - Weight::from_parts(71_282_000, 6196) + // Minimum execution time: 74_868_000 picoseconds. + Weight::from_parts(77_531_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -72,22 +74,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 660_000 picoseconds. - Weight::from_parts(695_000, 0) + // Minimum execution time: 688_000 picoseconds. + Weight::from_parts(733_000, 0) } pub(crate) fn pay_fees() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_096_000 picoseconds. - Weight::from_parts(3_313_000, 0) + // Minimum execution time: 3_491_000 picoseconds. + Weight::from_parts(3_667_000, 0) } - pub(crate) fn set_asset_claimer() -> Weight { + pub(crate) fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 661_000 picoseconds. - Weight::from_parts(707_000, 0) + // Minimum execution time: 757_000 picoseconds. + Weight::from_parts(804_000, 0) } /// Storage: `XcmPallet::Queries` (r:1 w:0) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -95,65 +97,65 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3465` - // Minimum execution time: 6_054_000 picoseconds. - Weight::from_parts(6_151_000, 3465) + // Minimum execution time: 6_322_000 picoseconds. + Weight::from_parts(6_565_000, 3465) .saturating_add(T::DbWeight::get().reads(1)) } pub(crate) fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_462_000 picoseconds. - Weight::from_parts(7_750_000, 0) + // Minimum execution time: 7_841_000 picoseconds. + Weight::from_parts(8_240_000, 0) } pub(crate) fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_378_000 picoseconds. - Weight::from_parts(1_454_000, 0) + // Minimum execution time: 1_327_000 picoseconds. + Weight::from_parts(1_460_000, 0) } pub(crate) fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 660_000 picoseconds. - Weight::from_parts(744_000, 0) + // Minimum execution time: 680_000 picoseconds. + Weight::from_parts(752_000, 0) } pub(crate) fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(755_000, 0) + // Minimum execution time: 712_000 picoseconds. + Weight::from_parts(764_000, 0) } pub(crate) fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 632_000 picoseconds. - Weight::from_parts(703_000, 0) + // Minimum execution time: 663_000 picoseconds. + Weight::from_parts(712_000, 0) } pub(crate) fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 712_000 picoseconds. - Weight::from_parts(771_000, 0) + // Minimum execution time: 756_000 picoseconds. + Weight::from_parts(801_000, 0) } pub(crate) fn execute_with_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 740_000 picoseconds. - Weight::from_parts(826_000, 0) + // Minimum execution time: 773_000 picoseconds. + Weight::from_parts(822_000, 0) } pub(crate) fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 653_000 picoseconds. - Weight::from_parts(707_000, 0) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(750_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -169,8 +171,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 66_765_000 picoseconds. - Weight::from_parts(69_016_000, 6196) + // Minimum execution time: 73_173_000 picoseconds. + Weight::from_parts(75_569_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -180,8 +182,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 9_545_000 picoseconds. - Weight::from_parts(9_853_000, 3488) + // Minimum execution time: 9_851_000 picoseconds. + Weight::from_parts(10_087_000, 3488) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +191,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 676_000 picoseconds. - Weight::from_parts(723_000, 0) + // Minimum execution time: 673_000 picoseconds. + Weight::from_parts(744_000, 0) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -206,8 +208,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 31_324_000 picoseconds. - Weight::from_parts(32_023_000, 3612) + // Minimum execution time: 35_714_000 picoseconds. + Weight::from_parts(36_987_000, 3612) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -217,44 +219,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_058_000 picoseconds. - Weight::from_parts(3_199_000, 0) + // Minimum execution time: 3_128_000 picoseconds. + Weight::from_parts(3_364_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub(crate) fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 994_000 picoseconds. - Weight::from_parts(1_115_000, 0) + // Minimum execution time: 1_070_000 picoseconds. + Weight::from_parts(1_188_000, 0) } pub(crate) fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 763_000 picoseconds. - Weight::from_parts(824_000, 0) + // Minimum execution time: 764_000 picoseconds. + Weight::from_parts(863_000, 0) } pub(crate) fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 665_000 picoseconds. - Weight::from_parts(712_000, 0) + // Minimum execution time: 675_000 picoseconds. + Weight::from_parts(755_000, 0) } pub(crate) fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 627_000 picoseconds. - Weight::from_parts(695_000, 0) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(745_000, 0) } pub(crate) fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 839_000 picoseconds. - Weight::from_parts(889_000, 0) + // Minimum execution time: 838_000 picoseconds. + Weight::from_parts(918_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -270,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 75_853_000 picoseconds. - Weight::from_parts(77_515_000, 6196) + // Minimum execution time: 82_721_000 picoseconds. + Weight::from_parts(85_411_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +281,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_183_000 picoseconds. - Weight::from_parts(8_378_000, 0) + // Minimum execution time: 8_138_000 picoseconds. + Weight::from_parts(8_344_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -296,8 +298,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 66_576_000 picoseconds. - Weight::from_parts(69_465_000, 6196) + // Minimum execution time: 73_617_000 picoseconds. + Weight::from_parts(76_999_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -305,35 +307,42 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 739_000 picoseconds. - Weight::from_parts(773_000, 0) + // Minimum execution time: 714_000 picoseconds. + Weight::from_parts(806_000, 0) } pub(crate) fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 648_000 picoseconds. - Weight::from_parts(693_000, 0) + // Minimum execution time: 676_000 picoseconds. + Weight::from_parts(720_000, 0) } pub(crate) fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 654_000 picoseconds. - Weight::from_parts(700_000, 0) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(731_000, 0) } pub(crate) fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 646_000 picoseconds. - Weight::from_parts(702_000, 0) + // Minimum execution time: 662_000 picoseconds. + Weight::from_parts(696_000, 0) } pub(crate) fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 665_000 picoseconds. - Weight::from_parts(714_000, 0) + // Minimum execution time: 693_000 picoseconds. + Weight::from_parts(760_000, 0) + } + pub(crate) fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 705_000 picoseconds. + Weight::from_parts(746_000, 0) } } diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index f8bb2676de3f..3f6a7304c8a9 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -38,13 +38,14 @@ use westend_runtime_constants::{ }; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, FrameTransactionalProcessor, - FungibleAdapter, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, - OriginToPluralityVoice, SendXcmFeeToAccount, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + ChildParachainAsNative, ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsChildSystemParachain, + IsConcrete, MintLocation, OriginToPluralityVoice, SendXcmFeeToAccount, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; @@ -183,6 +184,11 @@ pub type Barrier = TrailingSetTopicAsId<( /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = (SystemParachains, Equals, LocalPlurality); +/// We let locations alias into child locations of their own. +/// This is a very simple aliasing rule, mimicking the behaviour of +/// the `DescendOrigin` instruction. +pub type Aliasers = AliasChildLocation; + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -216,7 +222,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 53ea0b74463b..d9519dafe12d 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stores messages other authorities issue about candidates in Polkadot." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index 16205b0f51f5..3006d8325ef9 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "CLI to generate voter bags for Polkadot runtimes" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 86c7067ad6fa..7ac12dc1e377 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -49,3 +51,7 @@ json-schema = [ "dep:schemars", "sp-weights/json-schema", ] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index b07bdfdca3d1..d4131cc53ee6 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -5,6 +5,8 @@ edition.workspace = true license.workspace = true version = "7.0.0" description = "Benchmarks for the XCM pallet" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -62,4 +64,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs index 303ff9493f71..4428076aa077 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs @@ -231,6 +231,13 @@ benchmarks_instance_pallet! { let dest_account = T::AccountIdConverter::convert_location(&dest_location).unwrap(); assert!(T::TransactAsset::balance(&dest_account).is_zero()); + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &dest_location, + FeeReason::ChargeFees, + ); + let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::DepositAsset { @@ -257,6 +264,13 @@ benchmarks_instance_pallet! { let dest_account = T::AccountIdConverter::convert_location(&dest_location).unwrap(); assert!(T::TransactAsset::balance(&dest_account).is_zero()); + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &dest_location, + FeeReason::ChargeFees, + ); + let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::DepositReserveAsset { @@ -281,12 +295,20 @@ benchmarks_instance_pallet! { // Checked account starts at zero assert!(T::CheckedAccount::get().map_or(true, |(c, _)| T::TransactAsset::balance(&c).is_zero())); + let dest_location = T::valid_destination()?; + + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &dest_location, + FeeReason::ChargeFees, + ); let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::InitiateTeleport { assets: asset.into(), - dest: T::valid_destination()?, + dest: dest_location, xcm: Xcm::new(), }; let xcm = Xcm(vec![instruction]); @@ -303,6 +325,15 @@ benchmarks_instance_pallet! { let (sender_account, sender_location) = account_and_location::(1); let asset = T::get_asset(); let mut holding = T::worst_case_holding(1); + let dest_location = T::valid_destination()?; + + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &dest_location, + FeeReason::ChargeFees, + ); + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); // Add our asset to the holding. @@ -311,7 +342,7 @@ benchmarks_instance_pallet! { let mut executor = new_executor::(sender_location); executor.set_holding(holding.into()); let instruction = Instruction::>::InitiateTransfer { - destination: T::valid_destination()?, + destination: dest_location, // ReserveDeposit is the most expensive filter. remote_fees: Some(AssetTransferFilter::ReserveDeposit(asset.clone().into())), // It's more expensive if we reanchor the origin. diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 87bf27e4ff18..1c62bb5886d8 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -13,13 +13,14 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(feature = "runtime-benchmarks")] use super::*; use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; use alloc::{vec, vec::Vec}; use codec::Encode; -use frame_benchmarking::{benchmarks, BenchmarkError}; -use frame_support::traits::fungible::Inspect; +use frame_benchmarking::v2::*; +use frame_support::{traits::fungible::Inspect, BoundedVec}; use xcm::{ latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight, MAX_ITEMS_IN_ASSETS}, DoubleEncoded, @@ -29,16 +30,21 @@ use xcm_executor::{ ExecutorError, FeesMode, }; -benchmarks! { - report_holding { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn report_holding() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); // generate holding and add possible required fees @@ -64,21 +70,33 @@ benchmarks! { query_id: Default::default(), max_weight: Weight::MAX, }, - // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. - assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), + // Worst case is looking through all holdings for every asset explicitly - respecting + // the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite( + holding + .into_inner() + .into_iter() + .take(MAX_ITEMS_IN_ASSETS) + .collect::>() + .into(), + ), }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + + Ok(()) } // This benchmark does not use any additional orders or instructions. This should be managed // by the `deep` and `shallow` implementation. - buy_execution { + #[benchmark] + fn buy_execution() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -92,13 +110,16 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - pay_fees { + #[benchmark] + fn pay_fees() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -111,40 +132,57 @@ benchmarks! { let instruction = Instruction::>::PayFees { asset: fee_asset }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify {} + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) + } - set_asset_claimer { + #[benchmark] + fn asset_claimer() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let (_, sender_location) = account_and_location::(1); - let instruction = Instruction::SetAssetClaimer{ location:sender_location.clone() }; + let instruction = Instruction::SetHints { + hints: BoundedVec::::truncate_from(vec![AssetClaimer { + location: sender_location.clone(), + }]), + }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.asset_claimer(), Some(sender_location.clone())); + + Ok(()) } - query_response { + #[benchmark] + fn query_response() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let (query_id, response) = T::worst_case_response(); let max_weight = Weight::MAX; let querier: Option = Some(Here.into()); let instruction = Instruction::QueryResponse { query_id, response, max_weight, querier }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + + #[block] + { + executor.bench_process(xcm)?; + } // The assert above is enough to show this XCM succeeded + + Ok(()) } // We don't care about the call itself, since that is accounted for in the weight parameter // and included in the final weight calculation. So this is just the overhead of submitting // a noop call. - transact { + #[benchmark] + fn transact() -> Result<(), BenchmarkError> { let (origin, noop_call) = T::transact_origin_and_runtime_call()?; let mut executor = new_executor::(origin); let double_encoded_noop_call: DoubleEncoded<_> = noop_call.encode().into(); @@ -152,121 +190,148 @@ benchmarks! { let instruction = Instruction::Transact { origin_kind: OriginKind::SovereignAccount, call: double_encoded_noop_call, + fallback_max_weight: None, }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // TODO Make the assertion configurable? + + Ok(()) } - refund_surplus { + #[benchmark] + fn refund_surplus() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let holding_assets = T::worst_case_holding(1); // We can already buy execution since we'll load the holding register manually let asset_for_fees = T::fee_asset().unwrap(); - let previous_xcm = Xcm(vec![BuyExecution { fees: asset_for_fees, weight_limit: Limited(Weight::from_parts(1337, 1337)) }]); + let previous_xcm = Xcm(vec![BuyExecution { + fees: asset_for_fees, + weight_limit: Limited(Weight::from_parts(1337, 1337)), + }]); executor.set_holding(holding_assets.into()); executor.set_total_surplus(Weight::from_parts(1337, 1337)); executor.set_total_refunded(Weight::zero()); - executor.bench_process(previous_xcm).expect("Holding has been loaded, so we can buy execution here"); + executor + .bench_process(previous_xcm) + .expect("Holding has been loaded, so we can buy execution here"); let instruction = Instruction::>::RefundSurplus; let xcm = Xcm(vec![instruction]); - } : { - let result = executor.bench_process(xcm)?; - } verify { + #[block] + { + let _result = executor.bench_process(xcm)?; + } assert_eq!(executor.total_surplus(), &Weight::from_parts(1337, 1337)); assert_eq!(executor.total_refunded(), &Weight::from_parts(1337, 1337)); + + Ok(()) } - set_error_handler { + #[benchmark] + fn set_error_handler() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::>::SetErrorHandler(Xcm(vec![])); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.error_handler(), &Xcm(vec![])); + + Ok(()) } - set_appendix { + #[benchmark] + fn set_appendix() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let appendix = Xcm(vec![]); let instruction = Instruction::>::SetAppendix(appendix); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.appendix(), &Xcm(vec![])); + Ok(()) } - clear_error { + #[benchmark] + fn clear_error() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_error(Some((5u32, XcmError::Overflow))); let instruction = Instruction::>::ClearError; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(executor.error().is_none()) + #[block] + { + executor.bench_process(xcm)?; + } + assert!(executor.error().is_none()); + Ok(()) } - descend_origin { + #[benchmark] + fn descend_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let who = Junctions::from([OnlyChild, OnlyChild]); let instruction = Instruction::DescendOrigin(who.clone()); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert_eq!( - executor.origin(), - &Some(Location { - parents: 0, - interior: who, - }), - ); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: who }),); + + Ok(()) } - execute_with_origin { + #[benchmark] + fn execute_with_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let who: Junctions = Junctions::from([AccountId32 { id: [0u8; 32], network: None }]); - let instruction = Instruction::ExecuteWithOrigin { descendant_origin: Some(who.clone()), xcm: Xcm(vec![]) }; + let instruction = Instruction::ExecuteWithOrigin { + descendant_origin: Some(who.clone()), + xcm: Xcm(vec![]), + }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - assert_eq!( - executor.origin(), - &Some(Location { - parents: 0, - interior: Here, - }), - ); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: Here }),); + + Ok(()) } - clear_origin { + #[benchmark] + fn clear_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::ClearOrigin; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.origin(), &None); + Ok(()) } - report_error { + #[benchmark] + fn report_error() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let max_weight = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -278,18 +343,21 @@ benchmarks! { } executor.set_error(Some((0u32, XcmError::Unimplemented))); - let instruction = Instruction::ReportError(QueryResponseInfo { - query_id, destination, max_weight - }); + let instruction = + Instruction::ReportError(QueryResponseInfo { query_id, destination, max_weight }); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + + Ok(()) } - claim_asset { + #[benchmark] + fn claim_asset() -> Result<(), BenchmarkError> { use xcm_executor::traits::DropAssets; let (origin, ticket, assets) = T::claimable_asset()?; @@ -298,11 +366,7 @@ benchmarks! { ::AssetTrap::drop_assets( &origin, assets.clone().into(), - &XcmContext { - origin: Some(origin.clone()), - message_id: [0; 32], - topic: None, - }, + &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, ); // Assets should be in the trap now. @@ -310,28 +374,32 @@ benchmarks! { let mut executor = new_executor::(origin); let instruction = Instruction::ClaimAsset { assets: assets.clone(), ticket }; let xcm = Xcm(vec![instruction]); - } :{ - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert!(executor.holding().ensure_contains(&assets).is_ok()); + Ok(()) } - trap { + #[benchmark] + fn trap() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::Trap(10); let xcm = Xcm(vec![instruction]); // In order to access result in the verification below, it needs to be defined here. - let mut _result = Ok(()); - } : { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::Trap(10), - .. - }))); + let result; + #[block] + { + result = executor.bench_process(xcm); + } + assert!(matches!(result, Err(ExecutorError { xcm_error: XcmError::Trap(10), .. }))); + + Ok(()) } - subscribe_version { + #[benchmark] + fn subscribe_version() -> Result<(), BenchmarkError> { use xcm_executor::traits::VersionChangeNotifier; let origin = T::subscribe_origin()?; let query_id = Default::default(); @@ -339,40 +407,55 @@ benchmarks! { let mut executor = new_executor::(origin.clone()); let instruction = Instruction::SubscribeVersion { query_id, max_response_weight }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(::SubscriptionService::is_subscribed(&origin)); + + T::DeliveryHelper::ensure_successful_delivery(&origin, &origin, FeeReason::QueryPallet); + + #[block] + { + executor.bench_process(xcm)?; + } + assert!(::SubscriptionService::is_subscribed( + &origin + )); + Ok(()) } - unsubscribe_version { + #[benchmark] + fn unsubscribe_version() -> Result<(), BenchmarkError> { use xcm_executor::traits::VersionChangeNotifier; // First we need to subscribe to notifications. let (origin, _) = T::transact_origin_and_runtime_call()?; + + T::DeliveryHelper::ensure_successful_delivery(&origin, &origin, FeeReason::QueryPallet); + let query_id = Default::default(); let max_response_weight = Default::default(); ::SubscriptionService::start( &origin, query_id, max_response_weight, - &XcmContext { - origin: Some(origin.clone()), - message_id: [0; 32], - topic: None, - }, - ).map_err(|_| "Could not start subscription")?; - assert!(::SubscriptionService::is_subscribed(&origin)); + &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, + ) + .map_err(|_| "Could not start subscription")?; + assert!(::SubscriptionService::is_subscribed( + &origin + )); let mut executor = new_executor::(origin.clone()); let instruction = Instruction::UnsubscribeVersion; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(!::SubscriptionService::is_subscribed(&origin)); + #[block] + { + executor.bench_process(xcm)?; + } + assert!(!::SubscriptionService::is_subscribed( + &origin + )); + Ok(()) } - burn_asset { + #[benchmark] + fn burn_asset() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -381,13 +464,16 @@ benchmarks! { let instruction = Instruction::BurnAsset(assets.into()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert!(executor.holding().is_empty()); + Ok(()) } - expect_asset { + #[benchmark] + fn expect_asset() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -396,71 +482,86 @@ benchmarks! { let instruction = Instruction::ExpectAsset(assets.into()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // `execute` completing successfully is as good as we can check. + + Ok(()) } - expect_origin { + #[benchmark] + fn expect_origin() -> Result<(), BenchmarkError> { let expected_origin = Parent.into(); let mut executor = new_executor::(Default::default()); let instruction = Instruction::ExpectOrigin(Some(expected_origin)); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::ExpectationFalse, - .. - }))); + #[block] + { + _result = executor.bench_process(xcm); + } + assert!(matches!( + _result, + Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) + )); + + Ok(()) } - expect_error { + #[benchmark] + fn expect_error() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_error(Some((3u32, XcmError::Overflow))); let instruction = Instruction::ExpectError(None); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::ExpectationFalse, - .. - }))); + #[block] + { + _result = executor.bench_process(xcm); + } + assert!(matches!( + _result, + Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) + )); + + Ok(()) } - expect_transact_status { + #[benchmark] + fn expect_transact_status() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); - let worst_error = || -> MaybeErrorCode { - vec![0; MaxDispatchErrorLen::get() as usize].into() - }; + let worst_error = + || -> MaybeErrorCode { vec![0; MaxDispatchErrorLen::get() as usize].into() }; executor.set_transact_status(worst_error()); let instruction = Instruction::ExpectTransactStatus(worst_error()); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { + #[block] + { + _result = executor.bench_process(xcm); + } assert!(matches!(_result, Ok(..))); + Ok(()) } - query_pallet { + #[benchmark] + fn query_pallet() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::QueryPallet, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::QueryPallet, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); if let Some(expected_fees_mode) = expected_fees_mode { @@ -476,15 +577,19 @@ benchmarks! { response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + + Ok(()) } - expect_pallet { + #[benchmark] + fn expect_pallet() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { @@ -495,23 +600,27 @@ benchmarks! { min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // the execution succeeding is all we need to verify this xcm was successful + Ok(()) } - report_transact_status { + #[benchmark] + fn report_transact_status() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -529,84 +638,102 @@ benchmarks! { max_weight, }); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - clear_transact_status { + #[benchmark] + fn clear_transact_status() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_transact_status(b"MyError".to_vec().into()); let instruction = Instruction::ClearTransactStatus; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.transact_status(), &MaybeErrorCode::Success); + Ok(()) } - set_topic { + #[benchmark] + fn set_topic() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::SetTopic([1; 32]); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.topic(), &Some([1; 32])); + Ok(()) } - clear_topic { + #[benchmark] + fn clear_topic() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_topic(Some([2; 32])); let instruction = Instruction::ClearTopic; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.topic(), &None); + Ok(()) } - exchange_asset { + #[benchmark] + fn exchange_asset() -> Result<(), BenchmarkError> { let (give, want) = T::worst_case_asset_exchange().map_err(|_| BenchmarkError::Skip)?; let assets = give.clone(); let mut executor = new_executor::(Default::default()); executor.set_holding(give.into()); - let instruction = Instruction::ExchangeAsset { - give: assets.into(), - want: want.clone(), - maximal: true, - }; + let instruction = + Instruction::ExchangeAsset { give: assets.into(), want: want.clone(), maximal: true }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.holding(), &want.into()); + Ok(()) } - universal_origin { + #[benchmark] + fn universal_origin() -> Result<(), BenchmarkError> { let (origin, alias) = T::universal_alias().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::UniversalOrigin(alias); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } use frame_support::traits::Get; let universal_location = ::UniversalLocation::get(); - assert_eq!(executor.origin(), &Some(Junctions::from([alias]).relative_to(&universal_location))); + assert_eq!( + executor.origin(), + &Some(Junctions::from([alias]).relative_to(&universal_location)) + ); + + Ok(()) } - export_message { - let x in 1 .. 1000; + #[benchmark] + fn export_message(x: Linear<1, 1000>) -> Result<(), BenchmarkError> { // The `inner_xcm` influences `ExportMessage` total weight based on // `inner_xcm.encoded_size()`, so for this benchmark use smallest encoded instruction // to approximate weight per "unit" of encoded size; then actual weight can be estimated @@ -616,11 +743,12 @@ benchmarks! { // Get `origin`, `network` and `destination` from configured runtime. let (origin, network, destination) = T::export_message_origin_and_destination()?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &origin, - &destination.clone().into(), - FeeReason::Export { network, destination: destination.clone() }, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &origin, + &destination.clone().into(), + FeeReason::Export { network, destination: destination.clone() }, + ); let sender_account = T::AccountIdConverter::convert_location(&origin).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -631,37 +759,39 @@ benchmarks! { if let Some(expected_assets_in_holding) = expected_assets_in_holding { executor.set_holding(expected_assets_in_holding.into()); } - let xcm = Xcm(vec![ExportMessage { - network, destination: destination.clone(), xcm: inner_xcm, - }]); - }: { - executor.bench_process(xcm)?; - } verify { + let xcm = + Xcm(vec![ExportMessage { network, destination: destination.clone(), xcm: inner_xcm }]); + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - set_fees_mode { + #[benchmark] + fn set_fees_mode() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_fees_mode(FeesMode { jit_withdraw: false }); let instruction = Instruction::SetFeesMode { jit_withdraw: true }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.fees_mode(), &FeesMode { jit_withdraw: true }); + Ok(()) } - lock_asset { + #[benchmark] + fn lock_asset() -> Result<(), BenchmarkError> { let (unlocker, owner, asset) = T::unlockable_asset()?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &owner, - &unlocker, - FeeReason::LockAsset, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery(&owner, &unlocker, FeeReason::LockAsset); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -681,15 +811,18 @@ benchmarks! { let instruction = Instruction::LockAsset { asset, unlocker }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - unlock_asset { + #[benchmark] + fn unlock_asset() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -709,13 +842,15 @@ benchmarks! { // ... then unlock them with the UnlockAsset instruction. let instruction = Instruction::UnlockAsset { asset, target: owner }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - note_unlockable { + #[benchmark] + fn note_unlockable() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -735,13 +870,15 @@ benchmarks! { // ... then note them as unlockable with the NoteUnlockable instruction. let instruction = Instruction::NoteUnlockable { asset, owner }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - request_unlock { + #[benchmark] + fn request_unlock() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (locker, owner, asset) = T::unlockable_asset()?; @@ -756,11 +893,12 @@ benchmarks! { .enact() .map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &owner, - &locker, - FeeReason::RequestUnlock, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &owner, + &locker, + FeeReason::RequestUnlock, + ); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -774,15 +912,18 @@ benchmarks! { } let instruction = Instruction::RequestUnlock { asset, locker }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - unpaid_execution { + #[benchmark] + fn unpaid_execution() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_origin(Some(Here.into())); @@ -792,21 +933,27 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - alias_origin { + #[benchmark] + fn alias_origin() -> Result<(), BenchmarkError> { let (origin, target) = T::alias_origin().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::AliasOrigin(target.clone()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.origin(), &Some(target)); + Ok(()) } impl_benchmark_test_suite!( diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 4d44d75e34dd..81fcea05cac2 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -5,6 +5,8 @@ description = "A pallet for handling XCM programs." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -68,6 +70,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index e493d4838f5c..dd3c58c5dc77 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -96,6 +96,13 @@ benchmarks! { )? .into(); let versioned_msg = VersionedXcm::from(msg); + + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &versioned_dest.clone().try_into().unwrap(), + FeeReason::ChargeFees, + ); }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) teleport_assets { @@ -164,7 +171,7 @@ benchmarks! { } // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) - let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + T::DeliveryHelper::ensure_successful_delivery( &origin_location, &destination, FeeReason::ChargeFees, @@ -227,6 +234,13 @@ benchmarks! { let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); + + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &versioned_dest.clone().try_into().unwrap(), + FeeReason::ChargeFees, + ); }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0, WeightLimit::Unlimited) verify { // run provided verification function @@ -259,6 +273,14 @@ benchmarks! { BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); + + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &versioned_loc.clone().try_into().unwrap(), + FeeReason::ChargeFees, + ); + }: _(RawOrigin::Root, Box::new(versioned_loc)) force_unsubscribe_version_notify { @@ -266,6 +288,14 @@ benchmarks! { BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let versioned_loc: VersionedLocation = loc.clone().into(); + + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &versioned_loc.clone().try_into().unwrap(), + FeeReason::ChargeFees, + ); + let _ = crate::Pallet::::request_version_notify(loc); }: _(RawOrigin::Root, Box::new(versioned_loc)) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 5e0512c6a9fd..6360298b21c3 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -363,7 +363,10 @@ pub mod pallet { let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(Error::::from)?; + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::send", ?error, ?dest, ?message, "XCM send failed with error"); + Error::::from(error) + })?; let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; Self::deposit_event(e); Ok(message_id) @@ -1800,7 +1803,10 @@ impl Pallet { if let Some(remote_xcm) = remote_xcm { let (ticket, price) = validate_send::(dest.clone(), remote_xcm.clone()) - .map_err(Error::::from)?; + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::execute_xcm_transfer", ?error, ?dest, ?remote_xcm, "XCM validate_send failed with error"); + Error::::from(error) + })?; if origin != Here.into_location() { Self::charge_fees(origin.clone(), price.clone()).map_err(|error| { tracing::error!( @@ -1810,7 +1816,11 @@ impl Pallet { Error::::FeesNotMet })?; } - let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; + let message_id = T::XcmRouter::deliver(ticket) + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::execute_xcm_transfer", ?error, ?dest, ?remote_xcm, "XCM deliver failed with error"); + Error::::from(error) + })?; let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; Self::deposit_event(e); diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 83b35d19cf7e..88ed3c94ddf4 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true license.workspace = true version = "7.0.0" publish = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -24,3 +26,5 @@ trybuild = { features = ["diff"], workspace = true } # NOTE: we have to explicitly specify `std` because of trybuild # https://github.com/paritytech/polkadot-sdk/pull/5167 xcm = { workspace = true, default-features = true, features = ["std"] } +# For testing macros. +frame-support = { workspace = true } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index b65290332af9..34b89f13422c 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -20,8 +20,8 @@ use inflector::Inflector; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; use syn::{ - Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, Ident, Lit, Meta, MetaNameValue, - Result, Variant, + Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, GenericArgument, Ident, Lit, Meta, + MetaNameValue, PathArguments, Result, Type, TypePath, Variant, }; pub fn derive(input: DeriveInput) -> Result { @@ -29,7 +29,7 @@ pub fn derive(input: DeriveInput) -> Result { Data::Enum(data_enum) => data_enum, _ => return Err(Error::new_spanned(&input, "Expected the `Instruction` enum")), }; - let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum); + let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum)?; let builder_impl = generate_builder_impl(&input.ident, data_enum)?; let builder_unpaid_impl = generate_builder_unpaid_impl(&input.ident, data_enum)?; let output = quote! { @@ -83,54 +83,12 @@ pub fn derive(input: DeriveInput) -> Result { Ok(output) } -fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> TokenStream2 { - let methods = data_enum.variants.iter().map(|variant| { - let variant_name = &variant.ident; - let method_name_string = &variant_name.to_string().to_snake_case(); - let method_name = syn::Ident::new(method_name_string, variant_name.span()); - let docs = get_doc_comments(variant); - let method = match &variant.fields { - Fields::Unit => { - quote! { - pub fn #method_name(mut self) -> Self { - self.instructions.push(#name::::#variant_name); - self - } - } - }, - Fields::Unnamed(fields) => { - let arg_names: Vec<_> = fields - .unnamed - .iter() - .enumerate() - .map(|(index, _)| format_ident!("arg{}", index)) - .collect(); - let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); - quote! { - pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { - #(let #arg_names = #arg_names.into();)* - self.instructions.push(#name::::#variant_name(#(#arg_names),*)); - self - } - } - }, - Fields::Named(fields) => { - let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); - quote! { - pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { - #(let #arg_names = #arg_names.into();)* - self.instructions.push(#name::::#variant_name { #(#arg_names),* }); - self - } - } - }, - }; - quote! { - #(#docs)* - #method - } - }); +fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> Result { + let methods = data_enum + .variants + .iter() + .map(|variant| convert_variant_to_method(name, variant, None)) + .collect::>>()?; let output = quote! { impl XcmBuilder { #(#methods)* @@ -140,7 +98,7 @@ fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> TokenStream2 } } }; - output + Ok(output) } fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result { @@ -165,11 +123,17 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result>>()?; @@ -178,57 +142,14 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result { - let arg_names: Vec<_> = fields - .unnamed - .iter() - .enumerate() - .map(|(index, _)| format_ident!("arg{}", index)) - .collect(); - let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); - quote! { - #(#docs)* - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#variant_name(#(#arg_names),*)); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - }, - Fields::Named(fields) => { - let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); - quote! { - #(#docs)* - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#variant_name { #(#arg_names),* }); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - }, - _ => - return Err(Error::new_spanned( - variant, - "Instructions that load the holding register should take operands", - )), - }; + let method = convert_variant_to_method( + name, + variant, + Some(quote! { XcmBuilder }), + )?; Ok(method) }) - .collect::, _>>()?; + .collect::>>()?; let first_impl = quote! { impl XcmBuilder { @@ -240,27 +161,12 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result = data_enum .variants .iter() - .filter(|variant| variant.ident == "ClearOrigin") + .filter(|variant| variant.ident == "ClearOrigin" || variant.ident == "SetHints") .map(|variant| { - let variant_name = &variant.ident; - let method_name_string = &variant_name.to_string().to_snake_case(); - let method_name = syn::Ident::new(method_name_string, variant_name.span()); - let docs = get_doc_comments(variant); - let method = match &variant.fields { - Fields::Unit => { - quote! { - #(#docs)* - pub fn #method_name(mut self) -> XcmBuilder { - self.instructions.push(#name::::#variant_name); - self - } - } - }, - _ => return Err(Error::new_spanned(variant, "ClearOrigin should have no fields")), - }; + let method = convert_variant_to_method(name, variant, None)?; Ok(method) }) - .collect::, _>>()?; + .collect::>>()?; // Then we require fees to be paid let pay_fees_variants = data_enum @@ -295,36 +201,12 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result { - let arg_names: Vec<_> = - fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = - fields.named.iter().map(|field| &field.ty).collect(); - quote! { - #(#docs)* - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#variant_name { #(#arg_names),* }); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - }, - _ => - return Err(Error::new_spanned( - variant, - "Both BuyExecution and PayFees have named fields", - )), - }; - Ok(fields) + let method = convert_variant_to_method( + name, + variant, + Some(quote! { XcmBuilder }), + )?; + Ok(method) }) .collect::>>()?; @@ -349,35 +231,156 @@ fn generate_builder_unpaid_impl(name: &Ident, data_enum: &DataEnum) -> Result fields, - _ => - return Err(Error::new_spanned( - unpaid_execution_variant, - "UnpaidExecution should have named fields", - )), - }; - let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + let method = convert_variant_to_method( + name, + &unpaid_execution_variant, + Some(quote! { XcmBuilder }), + )?; Ok(quote! { impl XcmBuilder { - #(#docs)* - pub fn #unpaid_execution_method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#unpaid_execution_ident { #(#arg_names),* }); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, + #method + } + }) +} + +// Have to call with `XcmBuilder` in allowed_after_load_holding_methods. +fn convert_variant_to_method( + name: &Ident, + variant: &Variant, + maybe_return_type: Option, +) -> Result { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unit => + if let Some(return_type) = maybe_return_type { + quote! { + pub fn #method_name(self) -> #return_type { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + } else { + quote! { + pub fn #method_name(mut self) -> Self { + self.instructions.push(#name::::#variant_name); + self + } + } + }, + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + if let Some(return_type) = maybe_return_type { + quote! { + pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> #return_type { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + new_instructions.push(#name::::#variant_name(#(#arg_names),*)); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + } else { + quote! { + pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { + #(let #arg_names = #arg_names.into();)* + self.instructions.push(#name::::#variant_name(#(#arg_names),*)); + self + } } } - } + }, + Fields::Named(fields) => { + let normal_fields: Vec<_> = fields + .named + .iter() + .filter(|field| { + if let Type::Path(TypePath { path, .. }) = &field.ty { + for segment in &path.segments { + if segment.ident == format_ident!("BoundedVec") { + return false; + } + } + true + } else { + true + } + }) + .collect(); + let bounded_fields: Vec<_> = fields + .named + .iter() + .filter(|field| { + if let Type::Path(TypePath { path, .. }) = &field.ty { + for segment in &path.segments { + if segment.ident == format_ident!("BoundedVec") { + return true; + } + } + false + } else { + false + } + }) + .collect(); + let arg_names: Vec<_> = normal_fields.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = normal_fields.iter().map(|field| &field.ty).collect(); + let bounded_names: Vec<_> = bounded_fields.iter().map(|field| &field.ident).collect(); + let bounded_types = bounded_fields + .iter() + .map(|field| extract_generic_argument(&field.ty, 0, "BoundedVec's inner type")) + .collect::>>()?; + let bounded_sizes = bounded_fields + .iter() + .map(|field| extract_generic_argument(&field.ty, 1, "BoundedVec's size")) + .collect::>>()?; + let comma_in_the_middle = if normal_fields.is_empty() { + quote! {} + } else { + quote! {,} + }; + if let Some(return_type) = maybe_return_type { + quote! { + pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),* #comma_in_the_middle #(#bounded_names: Vec<#bounded_types>),*) -> #return_type { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + #(let #bounded_names = BoundedVec::<#bounded_types, #bounded_sizes>::truncate_from(#bounded_names);)* + new_instructions.push(#name::::#variant_name { #(#arg_names),* #comma_in_the_middle #(#bounded_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + } else { + quote! { + pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),* #comma_in_the_middle #(#bounded_names: Vec<#bounded_types>),*) -> Self { + #(let #arg_names = #arg_names.into();)* + #(let #bounded_names = BoundedVec::<#bounded_types, #bounded_sizes>::truncate_from(#bounded_names);)* + self.instructions.push(#name::::#variant_name { #(#arg_names),* #comma_in_the_middle #(#bounded_names),* }); + self + } + } + } + }, + }; + Ok(quote! { + #(#docs)* + #method }) } @@ -395,3 +398,40 @@ fn get_doc_comments(variant: &Variant) -> Vec { .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) .collect() } + +fn extract_generic_argument<'a>( + field_ty: &'a Type, + index: usize, + expected_msg: &str, +) -> Result<&'a Ident> { + if let Type::Path(type_path) = field_ty { + if let Some(segment) = type_path.path.segments.last() { + if let PathArguments::AngleBracketed(angle_brackets) = &segment.arguments { + let args: Vec<_> = angle_brackets.args.iter().collect(); + if let Some(GenericArgument::Type(Type::Path(TypePath { path, .. }))) = + args.get(index) + { + return path.get_ident().ok_or_else(|| { + Error::new_spanned( + path, + format!("Expected an identifier for {}", expected_msg), + ) + }); + } + return Err(Error::new_spanned( + angle_brackets, + format!("Expected a generic argument at index {} for {}", index, expected_msg), + )); + } + return Err(Error::new_spanned( + &segment.arguments, + format!("Expected angle-bracketed arguments for {}", expected_msg), + )); + } + return Err(Error::new_spanned( + &type_path.path, + format!("Expected at least one path segment for {}", expected_msg), + )); + } + Err(Error::new_spanned(field_ty, format!("Expected a path type for {}", expected_msg))) +} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs b/polkadot/xcm/procedural/src/enum_variants.rs similarity index 51% rename from polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs rename to polkadot/xcm/procedural/src/enum_variants.rs index 070f0be6bacc..f9f2d9e15675 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs +++ b/polkadot/xcm/procedural/src/enum_variants.rs @@ -14,19 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Test error when an instruction that loads the holding register doesn't take operands. - -use xcm_procedural::Builder; - -struct Xcm(pub Vec>); - -#[derive(Builder)] -enum Instruction { - #[builder(loads_holding)] - WithdrawAsset, - BuyExecution { fees: u128 }, - UnpaidExecution { weight_limit: (u32, u32) }, - Transact { call: Call }, +//! Simple derive macro for getting the number of variants in an enum. + +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use syn::{Data, DeriveInput, Error, Result}; + +pub fn derive(input: DeriveInput) -> Result { + let data_enum = match &input.data { + Data::Enum(data_enum) => data_enum, + _ => return Err(Error::new_spanned(&input, "Expected an enum.")), + }; + let ident = format_ident!("{}NumVariants", input.ident); + let number_of_variants: usize = data_enum.variants.iter().count(); + Ok(quote! { + pub struct #ident; + impl ::frame_support::traits::Get for #ident { + fn get() -> u32 { + #number_of_variants as u32 + } + } + }) } - -fn main() {} diff --git a/polkadot/xcm/procedural/src/lib.rs b/polkadot/xcm/procedural/src/lib.rs index 9971fdceb69a..0dd270286f69 100644 --- a/polkadot/xcm/procedural/src/lib.rs +++ b/polkadot/xcm/procedural/src/lib.rs @@ -20,6 +20,7 @@ use proc_macro::TokenStream; use syn::{parse_macro_input, DeriveInput}; mod builder_pattern; +mod enum_variants; mod v3; mod v4; mod v5; @@ -86,3 +87,11 @@ pub fn derive_builder(input: TokenStream) -> TokenStream { .unwrap_or_else(syn::Error::into_compile_error) .into() } + +#[proc_macro_derive(NumVariants)] +pub fn derive_num_variants(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + enum_variants::derive(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs index 4202309bf3f7..3915621916d4 100644 --- a/polkadot/xcm/procedural/tests/builder_pattern.rs +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -17,6 +17,7 @@ //! Test the methods generated by the Builder derive macro. //! Tests directly on the actual Xcm struct and Instruction enum. +use frame_support::BoundedVec; use xcm::latest::prelude::*; #[test] @@ -100,3 +101,61 @@ fn default_builder_allows_clear_origin_before_buy_execution() { ]) ); } + +#[test] +fn bounded_vecs_use_vecs_and_truncate_them() { + let claimer = Location::parent(); + // We can use a vec instead of a bounded vec for specifying hints. + let xcm: Xcm<()> = Xcm::builder_unsafe() + .set_hints(vec![AssetClaimer { location: claimer.clone() }]) + .build(); + assert_eq!( + xcm, + Xcm(vec![SetHints { + hints: BoundedVec::::truncate_from(vec![AssetClaimer { + location: Location::parent() + },]), + },]) + ); + + // If we include more than the limit they'll get truncated. + let xcm: Xcm<()> = Xcm::builder_unsafe() + .set_hints(vec![ + AssetClaimer { location: claimer.clone() }, + AssetClaimer { location: Location::here() }, + ]) + .build(); + assert_eq!( + xcm, + Xcm(vec![SetHints { + hints: BoundedVec::::truncate_from(vec![AssetClaimer { + location: Location::parent() + },]), + },]) + ); + + let xcm: Xcm<()> = Xcm::builder() + .withdraw_asset((Here, 100u128)) + .set_hints(vec![AssetClaimer { location: claimer }]) + .clear_origin() + .pay_fees((Here, 10u128)) + .deposit_asset(All, [0u8; 32]) + .build(); + assert_eq!( + xcm, + Xcm(vec![ + WithdrawAsset(Asset { id: AssetId(Location::here()), fun: Fungible(100) }.into()), + SetHints { + hints: BoundedVec::::truncate_from(vec![AssetClaimer { + location: Location::parent() + }]) + }, + ClearOrigin, + PayFees { asset: Asset { id: AssetId(Location::here()), fun: Fungible(10) } }, + DepositAsset { + assets: All.into(), + beneficiary: AccountId32 { id: [0u8; 32], network: None }.into() + }, + ]) + ); +} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs b/polkadot/xcm/procedural/tests/enum_variants.rs similarity index 70% rename from polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs rename to polkadot/xcm/procedural/tests/enum_variants.rs index bb98d603fd91..4a5362c1579a 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs +++ b/polkadot/xcm/procedural/tests/enum_variants.rs @@ -14,17 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Test error when the `BuyExecution` instruction doesn't take named fields. +//! Test the struct generated by the `NumVariants` derive macro. -use xcm_procedural::Builder; +use frame_support::traits::Get; +use xcm_procedural::NumVariants; -struct Xcm(pub Vec>); - -#[derive(Builder)] -enum Instruction { - BuyExecution { fees: u128 }, - UnpaidExecution(u32, u32), - Transact { call: Call }, +#[allow(dead_code)] +#[derive(NumVariants)] +enum SomeEnum { + Variant1, + Variant2, + Variant3, } -fn main() {} +#[test] +fn num_variants_works() { + assert_eq!(SomeEnumNumVariants::get(), 3); +} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr deleted file mode 100644 index 0358a35ad3dd..000000000000 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr +++ /dev/null @@ -1,6 +0,0 @@ -error: Instructions that load the holding register should take operands - --> tests/ui/builder_pattern/loads_holding_no_operands.rs:25:5 - | -25 | / #[builder(loads_holding)] -26 | | WithdrawAsset, - | |_________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr new file mode 100644 index 000000000000..c4d711e0d455 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr @@ -0,0 +1,5 @@ +error: Expected `builder(loads_holding)` or `builder(pays_fees)` + --> tests/ui/builder_pattern/unexpected_attribute.rs:25:5 + | +25 | #[builder(funds_holding)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr deleted file mode 100644 index 0a3c0a40a33b..000000000000 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: UnpaidExecution should have named fields - --> tests/ui/builder_pattern/unpaid_execution_named_fields.rs:26:5 - | -26 | UnpaidExecution(u32, u32), - | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index 1c8620708922..cbf85b454cc6 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -547,13 +547,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_send(dest: MultiLocation, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 9baf58eacfb0..a0ce551b7608 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1314,8 +1314,22 @@ impl TryFrom> for Instructi HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, HrmpChannelClosing { initiator, sender, recipient } => Self::HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_kind, mut call } => { - let require_weight_at_most = call.take_decoded()?.get_dispatch_info().call_weight; + Transact { origin_kind, mut call, fallback_max_weight } => { + // We first try to decode the call, if we can't, we use the fallback weight, + // if there's no fallback, we just return `Weight::MAX`. + let require_weight_at_most = match call.take_decoded() { + Ok(decoded) => decoded.get_dispatch_info().call_weight, + Err(error) => { + let fallback_weight = fallback_max_weight.unwrap_or(Weight::MAX); + log::debug!( + target: "xcm::versions::v5Tov4", + "Couldn't decode call in Transact: {:?}, using fallback weight: {:?}", + error, + fallback_weight, + ); + fallback_weight + }, + }; Self::Transact { origin_kind, require_weight_at_most, call: call.into() } }, ReportError(response_info) => Self::ReportError(QueryResponseInfo { @@ -1423,7 +1437,7 @@ impl TryFrom> for Instructi }, InitiateTransfer { .. } | PayFees { .. } | - SetAssetClaimer { .. } | + SetHints { .. } | ExecuteWithOrigin { .. } => { log::debug!(target: "xcm::versions::v5tov4", "`{new_instruction:?}` not supported by v4"); return Err(()); diff --git a/polkadot/xcm/src/v4/traits.rs b/polkadot/xcm/src/v4/traits.rs index f32b26fb163d..178093d27177 100644 --- a/polkadot/xcm/src/v4/traits.rs +++ b/polkadot/xcm/src/v4/traits.rs @@ -289,13 +289,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/src/v5/junction.rs b/polkadot/xcm/src/v5/junction.rs index 952b61cd9ffe..d86a762fcf44 100644 --- a/polkadot/xcm/src/v5/junction.rs +++ b/polkadot/xcm/src/v5/junction.rs @@ -143,16 +143,20 @@ pub enum NetworkId { /// The Kusama canary-net Relay-chain. Kusama, /// An Ethereum network specified by its chain ID. + #[codec(index = 7)] Ethereum { /// The EIP-155 chain ID. #[codec(compact)] chain_id: u64, }, /// The Bitcoin network, including hard-forks supported by Bitcoin Core development team. + #[codec(index = 8)] BitcoinCore, /// The Bitcoin network, including hard-forks supported by Bitcoin Cash developers. + #[codec(index = 9)] BitcoinCash, /// The Polkadot Bulletin chain. + #[codec(index = 10)] PolkadotBulletin, } diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index 830b23cc44b7..21845d07529e 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -196,6 +196,8 @@ pub mod prelude { AssetInstance::{self, *}, Assets, BodyId, BodyPart, Error as XcmError, ExecuteXcm, Fungibility::{self, *}, + Hint::{self, *}, + HintNumVariants, Instruction::*, InteriorLocation, Junction::{self, *}, @@ -493,13 +495,21 @@ pub enum Instruction { /// /// - `origin_kind`: The means of expressing the message origin as a dispatch origin. /// - `call`: The encoded transaction to be applied. + /// - `fallback_max_weight`: Used for compatibility with previous versions. Corresponds to the + /// `require_weight_at_most` parameter in previous versions. If you don't care about + /// compatibility you can just put `None`. WARNING: If you do, your XCM might not work with + /// older versions. Make sure to dry-run and validate. /// /// Safety: No concerns. /// /// Kind: *Command*. /// /// Errors: - Transact { origin_kind: OriginKind, call: DoubleEncoded }, + Transact { + origin_kind: OriginKind, + fallback_max_weight: Option, + call: DoubleEncoded, + }, /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by /// the relay-chain to a para. @@ -739,15 +749,6 @@ pub enum Instruction { /// Errors: None. ClearError, - /// Set asset claimer for all the trapped assets during the execution. - /// - /// - `location`: The claimer of any assets potentially trapped during the execution of current - /// XCM. It can be an arbitrary location, not necessarily the caller or origin. - /// - /// Kind: *Command* - /// - /// Errors: None. - SetAssetClaimer { location: Location }, /// Create some assets which are being held on behalf of the origin. /// /// - `assets`: The assets which are to be claimed. This must match exactly with the assets @@ -1128,6 +1129,25 @@ pub enum Instruction { /// Errors: /// - `BadOrigin` ExecuteWithOrigin { descendant_origin: Option, xcm: Xcm }, + + /// Set hints for XCM execution. + /// + /// These hints change the behaviour of the XCM program they are present in. + /// + /// Parameters: + /// + /// - `hints`: A bounded vector of `ExecutionHint`, specifying the different hints that will + /// be activated. + SetHints { hints: BoundedVec }, +} + +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Eq, Clone, xcm_procedural::NumVariants)] +pub enum Hint { + /// Set asset claimer for all the trapped assets during the execution. + /// + /// - `location`: The claimer of any assets potentially trapped during the execution of current + /// XCM. It can be an arbitrary location, not necessarily the caller or origin. + AssetClaimer { location: Location }, } impl Xcm { @@ -1159,7 +1179,8 @@ impl Instruction { HrmpChannelAccepted { recipient } => HrmpChannelAccepted { recipient }, HrmpChannelClosing { initiator, sender, recipient } => HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_kind, call } => Transact { origin_kind, call: call.into() }, + Transact { origin_kind, call, fallback_max_weight } => + Transact { origin_kind, call: call.into(), fallback_max_weight }, ReportError(response_info) => ReportError(response_info), DepositAsset { assets, beneficiary } => DepositAsset { assets, beneficiary }, DepositReserveAsset { assets, dest, xcm } => DepositReserveAsset { assets, dest, xcm }, @@ -1175,7 +1196,7 @@ impl Instruction { SetErrorHandler(xcm) => SetErrorHandler(xcm.into()), SetAppendix(xcm) => SetAppendix(xcm.into()), ClearError => ClearError, - SetAssetClaimer { location } => SetAssetClaimer { location }, + SetHints { hints } => SetHints { hints }, ClaimAsset { assets, ticket } => ClaimAsset { assets, ticket }, Trap(code) => Trap(code), SubscribeVersion { query_id, max_response_weight } => @@ -1227,7 +1248,8 @@ impl> GetWeight for Instruction { TransferAsset { assets, beneficiary } => W::transfer_asset(assets, beneficiary), TransferReserveAsset { assets, dest, xcm } => W::transfer_reserve_asset(&assets, dest, xcm), - Transact { origin_kind, call } => W::transact(origin_kind, call), + Transact { origin_kind, fallback_max_weight, call } => + W::transact(origin_kind, fallback_max_weight, call), HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => W::hrmp_new_channel_open_request(sender, max_message_size, max_capacity), HrmpChannelAccepted { recipient } => W::hrmp_channel_accepted(recipient), @@ -1249,7 +1271,7 @@ impl> GetWeight for Instruction { SetErrorHandler(xcm) => W::set_error_handler(xcm), SetAppendix(xcm) => W::set_appendix(xcm), ClearError => W::clear_error(), - SetAssetClaimer { location } => W::set_asset_claimer(location), + SetHints { hints } => W::set_hints(hints), ClaimAsset { assets, ticket } => W::claim_asset(assets, ticket), Trap(code) => W::trap(code), SubscribeVersion { query_id, max_response_weight } => @@ -1343,8 +1365,11 @@ impl TryFrom> for Instruction { HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, HrmpChannelClosing { initiator, sender, recipient } => Self::HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_kind, require_weight_at_most: _, call } => - Self::Transact { origin_kind, call: call.into() }, + Transact { origin_kind, require_weight_at_most, call } => Self::Transact { + origin_kind, + call: call.into(), + fallback_max_weight: Some(require_weight_at_most), + }, ReportError(response_info) => Self::ReportError(QueryResponseInfo { query_id: response_info.query_id, destination: response_info.destination.try_into().map_err(|_| ())?, @@ -1577,6 +1602,59 @@ mod tests { assert_eq!(new_xcm, xcm); } + #[test] + fn transact_roundtrip_works() { + // We can convert as long as there's a fallback. + let xcm = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000, 1_024)), + }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::WithdrawAsset((OldHere, 1u128).into()), + OldInstruction::Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + require_weight_at_most: Weight::from_parts(1_000_000, 1_024), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + + // If we have no fallback the resulting message won't know the weight. + let xcm_without_fallback = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + fallback_max_weight: None, + }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::WithdrawAsset((OldHere, 1u128).into()), + OldInstruction::Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + require_weight_at_most: Weight::MAX, + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm_without_fallback.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + let xcm_with_max_weight_fallback = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + fallback_max_weight: Some(Weight::MAX), + }, + ]); + assert_eq!(new_xcm, xcm_with_max_weight_fallback); + } + #[test] fn decoding_respects_limit() { let max_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize]); diff --git a/polkadot/xcm/src/v5/traits.rs b/polkadot/xcm/src/v5/traits.rs index 1f5041ca8d84..79d328561428 100644 --- a/polkadot/xcm/src/v5/traits.rs +++ b/polkadot/xcm/src/v5/traits.rs @@ -428,6 +428,7 @@ pub type SendResult = result::Result<(T, Assets), SendError>; /// let message = Xcm(vec![Instruction::Transact { /// origin_kind: OriginKind::Superuser, /// call: call.into(), +/// fallback_max_weight: None, /// }]); /// let message_hash = message.using_encoded(sp_io::hashing::blake2_256); /// @@ -459,6 +460,10 @@ pub trait SendXcm { /// Actually carry out the delivery operation for a previously validated message sending. fn deliver(ticket: Self::Ticket) -> result::Result; + + /// Ensure `[Self::delivery]` is successful for the given `location` when called in benchmarks. + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(_location: Option) {} } #[impl_trait_for_tuples::impl_for_tuples(30)] @@ -499,16 +504,23 @@ impl SendXcm for Tuple { )* ); Err(SendError::Unroutable) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + for_tuples!( #( + return Tuple::ensure_successful_delivery(location.clone()); + )* ); + } } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index eaa115740f3e..e64ab1928132 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -57,6 +59,7 @@ runtime-benchmarks = [ "polkadot-test-runtime/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 56a8493ef0ab..adba9a3ef79f 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -95,7 +95,8 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom })? .skip_inst_while(|inst| { matches!(inst, ClearOrigin | AliasOrigin(..)) || - matches!(inst, DescendOrigin(child) if child != &Here) + matches!(inst, DescendOrigin(child) if child != &Here) || + matches!(inst, SetHints { .. }) })? .match_next_inst(|inst| match inst { BuyExecution { weight_limit: Limited(ref mut weight), .. } diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 978c6870cdaf..0093051290b7 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -70,8 +70,8 @@ impl< Router: SendXcm, Querier: QueryHandler, Timeout: Get, - Beneficiary: Clone, - AssetKind, + Beneficiary: Clone + core::fmt::Debug, + AssetKind: core::fmt::Debug, AssetKindToLocatableAsset: TryConvert, BeneficiaryRefToLocation: for<'a> TryConvert<&'a Beneficiary, Location>, > Pay @@ -144,10 +144,9 @@ impl< } #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(_: &Self::Beneficiary, _: Self::AssetKind, _: Self::Balance) { - // We cannot generally guarantee this will go through successfully since we don't have any - // control over the XCM transport layers. We just assume that the benchmark environment - // will be sending it somewhere sensible. + fn ensure_successful(_: &Self::Beneficiary, asset_kind: Self::AssetKind, _: Self::Balance) { + let locatable = AssetKindToLocatableAsset::try_convert(asset_kind).unwrap(); + Router::ensure_successful_delivery(Some(locatable.location)); } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index 8dafbf66adf0..67c05c116e9d 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -58,7 +58,7 @@ impl< let message = Xcm::::try_from(versioned_message).map_err(|_| { log::trace!( target: LOG_TARGET, - "Failed to convert `VersionedXcm` into `XcmV3`.", + "Failed to convert `VersionedXcm` into `xcm::prelude::Xcm`!", ); ProcessMessageError::Unsupported diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index fc2de89d2128..5b0d0a5f9835 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -60,6 +60,11 @@ impl SendXcm for WithUniqueTopic { Inner::deliver(ticket)?; Ok(unique_id) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + Inner::ensure_successful_delivery(location); + } } impl InspectMessageQueues for WithUniqueTopic { fn clear_messages() { @@ -114,6 +119,11 @@ impl SendXcm for WithTopicSource) { + Inner::ensure_successful_delivery(location); + } } /// Trait for a type which ensures all requirements for successful delivery with XCM transport @@ -211,4 +221,9 @@ impl SendXcm for EnsureDecodableXcm { fn deliver(ticket: Self::Ticket) -> Result { Inner::deliver(ticket) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + Inner::ensure_successful_delivery(location); + } } diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs index cd2b6db66efc..d8805274d3a5 100644 --- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs @@ -333,6 +333,26 @@ fn allow_paid_should_deprivilege_origin() { assert_eq!(r, Err(ProcessMessageError::Overweight(Weight::from_parts(30, 30)))); } +#[test] +fn allow_paid_should_allow_hints() { + AllowPaidFrom::set(vec![Parent.into()]); + let fees = (Parent, 1).into(); + + let mut paying_message_with_hints = Xcm::<()>(vec![ + ReserveAssetDeposited((Parent, 100).into()), + SetHints { hints: vec![AssetClaimer { location: Location::here() }].try_into().unwrap() }, + BuyExecution { fees, weight_limit: Limited(Weight::from_parts(30, 30)) }, + DepositAsset { assets: AllCounted(1).into(), beneficiary: Here.into() }, + ]); + let r = AllowTopLevelPaidExecutionFrom::>::should_execute( + &Parent.into(), + paying_message_with_hints.inner_mut(), + Weight::from_parts(30, 30), + &mut props(Weight::zero()), + ); + assert_eq!(r, Ok(())); +} + #[test] fn suspension_should_work() { TestSuspender::set_suspended(true); diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs index 062faee2abd9..b4718edc6c98 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs @@ -22,7 +22,7 @@ use frame_support::{assert_ok, traits::tokens::Pay}; /// Type representing both a location and an asset that is held at that location. /// The id of the held asset is relative to the location where it is being held. -#[derive(Encode, Decode, Clone, PartialEq, Eq)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, Debug)] pub struct AssetKind { destination: Location, asset_id: AssetId, diff --git a/polkadot/xcm/xcm-builder/src/tests/transacting.rs b/polkadot/xcm/xcm-builder/src/tests/transacting.rs index 8963e7147fdc..ba932beaeb3d 100644 --- a/polkadot/xcm/xcm-builder/src/tests/transacting.rs +++ b/polkadot/xcm/xcm-builder/src/tests/transacting.rs @@ -23,6 +23,7 @@ fn transacting_should_work() { let message = Xcm::(vec![Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -43,6 +44,7 @@ fn transacting_should_respect_max_weight_requirement() { let message = Xcm::(vec![Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -65,6 +67,7 @@ fn transacting_should_refund_weight() { call: TestCall::Any(Weight::from_parts(50, 50), Some(Weight::from_parts(30, 30))) .encode() .into(), + fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -96,6 +99,7 @@ fn paid_transacting_should_refund_payment_for_unused_weight() { call: TestCall::Any(Weight::from_parts(50, 50), Some(Weight::from_parts(10, 10))) .encode() .into(), + fallback_max_weight: None, }, RefundSurplus, DepositAsset { assets: AllCounted(1).into(), beneficiary: one }, @@ -124,6 +128,7 @@ fn report_successful_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ReportTransactStatus(QueryResponseInfo { destination: Parent.into(), @@ -159,6 +164,7 @@ fn report_failed_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ReportTransactStatus(QueryResponseInfo { destination: Parent.into(), @@ -194,6 +200,7 @@ fn expect_successful_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -212,6 +219,7 @@ fn expect_successful_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -238,6 +246,7 @@ fn expect_failed_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ExpectTransactStatus(vec![2].into()), ]); @@ -256,6 +265,7 @@ fn expect_failed_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ExpectTransactStatus(vec![2].into()), ]); @@ -282,6 +292,7 @@ fn clear_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ClearTransactStatus, ReportTransactStatus(QueryResponseInfo { diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 5c754f01ec0a..6b3c3adf737d 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -68,25 +68,36 @@ impl> SendXcm fn validate( dest: &mut Option, - xcm: &mut Option>, + msg: &mut Option>, ) -> SendResult { - let d = dest.take().ok_or(MissingArgument)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; let universal_source = UniversalLocation::get(); - let devolved = match ensure_is_remote(universal_source.clone(), d) { - Ok(x) => x, - Err(d) => { - *dest = Some(d); - return Err(NotApplicable) - }, - }; - let (network, destination) = devolved; - let xcm = xcm.take().ok_or(SendError::MissingArgument)?; - validate_export::(network, 0, universal_source, destination, xcm) + let devolved = ensure_is_remote(universal_source.clone(), d).map_err(|_| NotApplicable)?; + let (remote_network, remote_location) = devolved; + let xcm = msg.take().ok_or(MissingArgument)?; + + validate_export::( + remote_network, + 0, + universal_source, + remote_location, + xcm.clone(), + ) + .inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + }) } fn deliver(ticket: Exporter::Ticket) -> Result { Exporter::deliver(ticket) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(_: Option) {} } pub trait ExporterFor { @@ -95,7 +106,7 @@ pub trait ExporterFor { /// /// The payment is specified from the local context, not the bridge chain. This is the /// total amount to withdraw in to Holding and should cover both payment for the execution on - /// the bridge chain as well as payment for the use of the `ExportMessage` instruction. + /// the bridge chain and payment for the use of the `ExportMessage` instruction. fn exporter_for( network: &NetworkId, remote_location: &InteriorLocation, @@ -205,7 +216,8 @@ impl, msg: &mut Option>, ) -> SendResult { - let d = dest.clone().ok_or(MissingArgument)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; let devolved = ensure_is_remote(UniversalLocation::get(), d).map_err(|_| NotApplicable)?; let (remote_network, remote_location) = devolved; let xcm = msg.take().ok_or(MissingArgument)?; @@ -216,7 +228,7 @@ impl(bridge, message) + validate_send::(bridge, message).inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + }) } fn deliver(validation: Self::Ticket) -> Result { Router::deliver(validation) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + Router::ensure_successful_delivery(location); + } } /// Implementation of `SendXcm` which wraps the message inside an `ExportMessage` instruction @@ -272,9 +298,9 @@ impl, msg: &mut Option>, ) -> SendResult { - let d = dest.as_ref().ok_or(MissingArgument)?; - let devolved = - ensure_is_remote(UniversalLocation::get(), d.clone()).map_err(|_| NotApplicable)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; + let devolved = ensure_is_remote(UniversalLocation::get(), d).map_err(|_| NotApplicable)?; let (remote_network, remote_location) = devolved; let xcm = msg.take().ok_or(MissingArgument)?; @@ -284,7 +310,7 @@ impl(bridge, message)?; + let (v, mut cost) = validate_send::(bridge, message).inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + })?; if let Some(bridge_payment) = maybe_payment { cost.push(bridge_payment); } @@ -335,6 +369,11 @@ impl Result { Router::deliver(ticket) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + Router::ensure_successful_delivery(location); + } } impl InspectMessageQueues @@ -476,10 +515,10 @@ impl< let Location { parents, interior: mut junctions } = BridgedNetwork::get(); match junctions.take_first() { Some(GlobalConsensus(network)) => (network, parents), - _ => return Err(SendError::NotApplicable), + _ => return Err(NotApplicable), } }; - ensure!(&network == &bridged_network, SendError::NotApplicable); + ensure!(&network == &bridged_network, NotApplicable); // We don't/can't use the `channel` for this adapter. let dest = destination.take().ok_or(SendError::MissingArgument)?; @@ -496,7 +535,7 @@ impl< }, Err((dest, _)) => { *destination = Some(dest); - return Err(SendError::NotApplicable) + return Err(NotApplicable) }, }; @@ -540,6 +579,10 @@ impl< #[cfg(test)] mod tests { use super::*; + use frame_support::{ + assert_err, assert_ok, + traits::{Contains, Equals}, + }; #[test] fn ensure_is_remote_works() { @@ -564,20 +607,50 @@ mod tests { assert_eq!(x, Err((Parent, Polkadot, Parachain(1000)).into())); } - pub struct OkSender; - impl SendXcm for OkSender { + pub struct OkFor(PhantomData); + impl> SendXcm for OkFor { type Ticket = (); fn validate( - _destination: &mut Option, + destination: &mut Option, _message: &mut Option>, ) -> SendResult { - Ok(((), Assets::new())) + if let Some(d) = destination.as_ref() { + if Filter::contains(&d) { + return Ok(((), Assets::new())) + } + } + Err(NotApplicable) } fn deliver(_ticket: Self::Ticket) -> Result { Ok([0; 32]) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(_: Option) {} + } + impl> ExportXcm for OkFor { + type Ticket = (); + + fn validate( + network: NetworkId, + _: u32, + _: &mut Option, + destination: &mut Option, + _: &mut Option>, + ) -> SendResult { + if let Some(d) = destination.as_ref() { + if Filter::contains(&(network, d.clone())) { + return Ok(((), Assets::new())) + } + } + Err(NotApplicable) + } + + fn deliver(_ticket: Self::Ticket) -> Result { + Ok([1; 32]) + } } /// Generic test case asserting that dest and msg is not consumed by `validate` implementation @@ -598,46 +671,168 @@ mod tests { } #[test] - fn remote_exporters_does_not_consume_dest_or_msg_on_not_applicable() { + fn local_exporters_works() { frame_support::parameter_types! { pub Local: NetworkId = ByGenesis([0; 32]); pub UniversalLocation: InteriorLocation = [GlobalConsensus(Local::get()), Parachain(1234)].into(); pub DifferentRemote: NetworkId = ByGenesis([22; 32]); - // no routers - pub BridgeTable: Vec = vec![]; + pub RemoteDestination: Junction = Parachain(9657); + pub RoutableBridgeFilter: (NetworkId, InteriorLocation) = (DifferentRemote::get(), RemoteDestination::get().into()); } + type RoutableBridgeExporter = OkFor>; + type NotApplicableBridgeExporter = OkFor<()>; + assert_ok!(validate_export::( + DifferentRemote::get(), + 0, + UniversalLocation::get(), + RemoteDestination::get().into(), + Xcm::default() + )); + assert_err!( + validate_export::( + DifferentRemote::get(), + 0, + UniversalLocation::get(), + RemoteDestination::get().into(), + Xcm::default() + ), + NotApplicable + ); - // check with local destination (should be remote) + // 1. check with local destination (should be remote) let local_dest: Location = (Parent, Parachain(5678)).into(); assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); + // UnpaidLocalExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter, OkSender, UniversalLocation>, + UnpaidLocalExporter, >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // 2. check with not applicable from the inner router (using `NotApplicableBridgeSender`) + let remote_dest: Location = + (Parent, Parent, DifferentRemote::get(), RemoteDestination::get()).into(); + assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + + // UnpaidLocalExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidLocalExporter, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + + // 3. Ok - deliver + // UnpaidRemoteExporter + assert_ok!(send_xcm::>( + remote_dest, + Xcm::default() + )); + } + + #[test] + fn remote_exporters_works() { + frame_support::parameter_types! { + pub Local: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(Local::get()), Parachain(1234)].into(); + pub DifferentRemote: NetworkId = ByGenesis([22; 32]); + pub RoutableBridge: Location = Location::new(1, Parachain(9657)); + // not routable + pub NotApplicableBridgeTable: Vec = vec![]; + // routable + pub RoutableBridgeTable: Vec = vec![ + NetworkExportTableItem::new( + DifferentRemote::get(), + None, + RoutableBridge::get(), + None + ) + ]; + } + type RoutableBridgeSender = OkFor>; + type NotApplicableBridgeSender = OkFor<()>; + assert_ok!(validate_send::(RoutableBridge::get(), Xcm::default())); + assert_err!( + validate_send::(RoutableBridge::get(), Xcm::default()), + NotApplicable + ); + + // 1. check with local destination (should be remote) + let local_dest: Location = (Parent, Parachain(5678)).into(); + assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); + + // UnpaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // SovereignPaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< SovereignPaidRemoteExporter< - NetworkExportTable, - OkSender, + NetworkExportTable, + RoutableBridgeSender, UniversalLocation, >, >(local_dest, |result| assert_eq!(Err(NotApplicable), result)); - // check with not applicable destination + // 2. check with not applicable destination (`NotApplicableBridgeTable`) let remote_dest: Location = (Parent, Parent, DifferentRemote::get()).into(); assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + // UnpaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter, OkSender, UniversalLocation>, + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - + // SovereignPaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< SovereignPaidRemoteExporter< - NetworkExportTable, - OkSender, + NetworkExportTable, + RoutableBridgeSender, UniversalLocation, >, >(remote_dest, |result| assert_eq!(Err(NotApplicable), result)); + + // 3. check with not applicable from the inner router (using `NotApplicableBridgeSender`) + let remote_dest: Location = (Parent, Parent, DifferentRemote::get()).into(); + assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + + // UnpaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidRemoteExporter< + NetworkExportTable, + NotApplicableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // SovereignPaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + SovereignPaidRemoteExporter< + NetworkExportTable, + NotApplicableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + + // 4. Ok - deliver + // UnpaidRemoteExporter + assert_ok!(send_xcm::< + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), Xcm::default())); + // SovereignPaidRemoteExporter + assert_ok!(send_xcm::< + SovereignPaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(remote_dest, Xcm::default())); } #[test] diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index cc966f91fe4d..eb558c0fcc19 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -30,6 +32,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 7e6bfe967b90..a89dd74a44fa 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -13,9 +13,12 @@ workspace = true [dependencies] codec = { workspace = true, default-features = true } frame-support = { workspace = true } +frame-system = { workspace = true, default-features = true } futures = { workspace = true } pallet-transaction-payment = { workspace = true, default-features = true } +pallet-sudo = { workspace = true, default-features = true } pallet-xcm = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-test-client = { workspace = true } polkadot-test-runtime = { workspace = true } polkadot-test-service = { workspace = true } @@ -30,4 +33,4 @@ sp-core = { workspace = true, default-features = true } [features] default = ["std"] -std = ["frame-support/std", "sp-runtime/std", "xcm/std"] +std = ["frame-support/std", "frame-system/std", "pallet-sudo/std", "polkadot-runtime-parachains/std", "sp-runtime/std", "xcm/std"] diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index 9b918fd7eeed..dfcc3fc4187f 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -79,7 +79,11 @@ fn transact_recursion_limit_works() { Xcm(vec![ WithdrawAsset((Here, 1_000).into()), BuyExecution { fees: (Here, 1).into(), weight_limit: Unlimited }, - Transact { origin_kind: OriginKind::Native, call: call.encode().into() }, + Transact { + origin_kind: OriginKind::Native, + call: call.encode().into(), + fallback_max_weight: None, + }, ]) }; let mut call: Option = None; @@ -371,6 +375,26 @@ fn deposit_reserve_asset_works_for_any_xcm_sender() { let mut block_builder = client.init_polkadot_block_builder(); + // Make the para available, so that `DMP` doesn't reject the XCM because the para is unknown. + let make_para_available = + construct_extrinsic( + &client, + polkadot_test_runtime::RuntimeCall::Sudo(pallet_sudo::Call::sudo { + call: Box::new(polkadot_test_runtime::RuntimeCall::System( + frame_system::Call::set_storage { + items: vec![( + polkadot_runtime_parachains::paras::Heads::< + polkadot_test_runtime::Runtime, + >::hashed_key_for(2000u32), + vec![1, 2, 3], + )], + }, + )), + }), + sp_keyring::Sr25519Keyring::Alice, + 0, + ); + // Simulate execution of an incoming XCM message at the reserve chain let execute = construct_extrinsic( &client, @@ -379,9 +403,12 @@ fn deposit_reserve_asset_works_for_any_xcm_sender() { max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), }), sp_keyring::Sr25519Keyring::Alice, - 0, + 1, ); + block_builder + .push_polkadot_extrinsic(make_para_available) + .expect("pushes extrinsic"); block_builder.push_polkadot_extrinsic(execute).expect("pushes extrinsic"); let block = block_builder.build().expect("Finalizes the block").block; diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 4e051f24050c..d0f18aea1ab3 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -939,7 +939,8 @@ impl XcmExecutor { Ok(()) }) }, - Transact { origin_kind, mut call } => { + // `fallback_max_weight` is not used in the executor, it's only for conversions. + Transact { origin_kind, mut call, .. } => { // We assume that the Relay-chain is allowed to use transact on this parachain. let origin = self.cloned_origin().ok_or_else(|| { tracing::trace!( @@ -1086,18 +1087,19 @@ impl XcmExecutor { DepositReserveAsset { assets, dest, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - let maybe_delivery_fee_from_holding = if self.fees.is_empty() { - self.get_delivery_fee_from_holding(&assets, &dest, &xcm)? + let mut assets = self.holding.saturating_take(assets); + // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from + // transferred assets. + let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { + // Deduct and return the part of `assets` that shall be used for delivery fees. + self.take_delivery_fee_from_assets(&mut assets, &dest, FeeReason::DepositReserveAsset, &xcm)? } else { None }; - let mut message = Vec::with_capacity(xcm.len() + 2); - // now take assets to deposit (after having taken delivery fees) - let deposited = self.holding.saturating_take(assets); - tracing::trace!(target: "xcm::DepositReserveAsset", ?deposited, "Assets except delivery fee"); + tracing::trace!(target: "xcm::DepositReserveAsset", ?assets, "Assets except delivery fee"); Self::do_reserve_deposit_assets( - deposited, + assets, &dest, &mut message, Some(&self.context), @@ -1106,7 +1108,7 @@ impl XcmExecutor { message.push(ClearOrigin); // append custom instructions message.extend(xcm.0.into_iter()); - if let Some(delivery_fee) = maybe_delivery_fee_from_holding { + if let Some(delivery_fee) = maybe_delivery_fee_from_assets { // Put back delivery_fee in holding register to be charged by XcmSender. self.holding.subsume_assets(delivery_fee); } @@ -1121,7 +1123,15 @@ impl XcmExecutor { InitiateReserveWithdraw { assets, reserve, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - let assets = self.holding.saturating_take(assets); + let mut assets = self.holding.saturating_take(assets); + // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from + // transferred assets. + let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { + // Deduct and return the part of `assets` that shall be used for delivery fees. + self.take_delivery_fee_from_assets(&mut assets, &reserve, FeeReason::InitiateReserveWithdraw, &xcm)? + } else { + None + }; let mut message = Vec::with_capacity(xcm.len() + 2); Self::do_reserve_withdraw_assets( assets, @@ -1133,6 +1143,10 @@ impl XcmExecutor { message.push(ClearOrigin); // append custom instructions message.extend(xcm.0.into_iter()); + if let Some(delivery_fee) = maybe_delivery_fee_from_assets { + // Put back delivery_fee in holding register to be charged by XcmSender. + self.holding.subsume_assets(delivery_fee); + } self.send(reserve, Xcm(message), FeeReason::InitiateReserveWithdraw)?; Ok(()) }); @@ -1144,13 +1158,25 @@ impl XcmExecutor { InitiateTeleport { assets, dest, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - let assets = self.holding.saturating_take(assets); + let mut assets = self.holding.saturating_take(assets); + // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from + // transferred assets. + let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { + // Deduct and return the part of `assets` that shall be used for delivery fees. + self.take_delivery_fee_from_assets(&mut assets, &dest, FeeReason::InitiateTeleport, &xcm)? + } else { + None + }; let mut message = Vec::with_capacity(xcm.len() + 2); Self::do_teleport_assets(assets, &dest, &mut message, &self.context)?; // clear origin for subsequent custom instructions message.push(ClearOrigin); // append custom instructions message.extend(xcm.0.into_iter()); + if let Some(delivery_fee) = maybe_delivery_fee_from_assets { + // Put back delivery_fee in holding register to be charged by XcmSender. + self.holding.subsume_assets(delivery_fee); + } self.send(dest.clone(), Xcm(message), FeeReason::InitiateTeleport)?; Ok(()) }); @@ -1370,8 +1396,14 @@ impl XcmExecutor { self.error = None; Ok(()) }, - SetAssetClaimer { location } => { - self.asset_claimer = Some(location); + SetHints { hints } => { + for hint in hints.into_iter() { + match hint { + AssetClaimer { location } => { + self.asset_claimer = Some(location) + }, + } + } Ok(()) }, ClaimAsset { assets, ticket } => { @@ -1707,36 +1739,48 @@ impl XcmExecutor { Ok(()) } - /// Gets the necessary delivery fee to send a reserve transfer message to `destination` from - /// holding. + /// Take from transferred `assets` the delivery fee required to send an onward transfer message + /// to `destination`. /// /// Will be removed once the transition from `BuyExecution` to `PayFees` is complete. - fn get_delivery_fee_from_holding( - &mut self, - assets: &AssetFilter, + fn take_delivery_fee_from_assets( + &self, + assets: &mut AssetsInHolding, destination: &Location, + reason: FeeReason, xcm: &Xcm<()>, ) -> Result, XcmError> { - // we need to do this take/put cycle to solve wildcards and get exact assets to - // be weighed - let to_weigh = self.holding.saturating_take(assets.clone()); - self.holding.subsume_assets(to_weigh.clone()); + let to_weigh = assets.clone(); let to_weigh_reanchored = Self::reanchored(to_weigh, &destination, None); - let mut message_to_weigh = vec![ReserveAssetDeposited(to_weigh_reanchored), ClearOrigin]; + let remote_instruction = match reason { + FeeReason::DepositReserveAsset => ReserveAssetDeposited(to_weigh_reanchored), + FeeReason::InitiateReserveWithdraw => WithdrawAsset(to_weigh_reanchored), + FeeReason::InitiateTeleport => ReceiveTeleportedAsset(to_weigh_reanchored), + _ => { + tracing::debug!( + target: "xcm::take_delivery_fee_from_assets", + "Unexpected delivery fee reason", + ); + return Err(XcmError::NotHoldingFees); + }, + }; + let mut message_to_weigh = Vec::with_capacity(xcm.len() + 2); + message_to_weigh.push(remote_instruction); + message_to_weigh.push(ClearOrigin); message_to_weigh.extend(xcm.0.clone().into_iter()); let (_, fee) = validate_send::(destination.clone(), Xcm(message_to_weigh))?; let maybe_delivery_fee = fee.get(0).map(|asset_needed_for_fees| { tracing::trace!( - target: "xcm::fees::DepositReserveAsset", + target: "xcm::fees::take_delivery_fee_from_assets", "Asset provided to pay for fees {:?}, asset required for delivery fees: {:?}", self.asset_used_in_buy_execution, asset_needed_for_fees, ); let asset_to_pay_for_fees = self.calculate_asset_for_delivery_fees(asset_needed_for_fees.clone()); // set aside fee to be charged by XcmSender - let delivery_fee = self.holding.saturating_take(asset_to_pay_for_fees.into()); - tracing::trace!(target: "xcm::fees::DepositReserveAsset", ?delivery_fee); + let delivery_fee = assets.saturating_take(asset_to_pay_for_fees.into()); + tracing::trace!(target: "xcm::fees::take_delivery_fee_from_assets", ?delivery_fee); delivery_fee }); Ok(maybe_delivery_fee) diff --git a/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs b/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs index bc504b8db2a2..cc97e2b3a16e 100644 --- a/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs +++ b/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs @@ -38,7 +38,7 @@ fn set_asset_claimer() { // if withdrawing fails we're not missing any corner case. .withdraw_asset((Here, 100u128)) .clear_origin() - .set_asset_claimer(bob.clone()) + .set_hints(vec![AssetClaimer { location: bob.clone() }]) .pay_fees((Here, 10u128)) // 10% destined for fees, not more. .build(); @@ -93,7 +93,7 @@ fn trap_then_set_asset_claimer() { .withdraw_asset((Here, 100u128)) .clear_origin() .trap(0u64) - .set_asset_claimer(bob) + .set_hints(vec![AssetClaimer { location: bob }]) .pay_fees((Here, 10u128)) // 10% destined for fees, not more. .build(); @@ -121,7 +121,7 @@ fn set_asset_claimer_then_trap() { // if withdrawing fails we're not missing any corner case. .withdraw_asset((Here, 100u128)) .clear_origin() - .set_asset_claimer(bob.clone()) + .set_hints(vec![AssetClaimer { location: bob.clone() }]) .trap(0u64) .pay_fees((Here, 10u128)) // 10% destined for fees, not more. .build(); diff --git a/polkadot/xcm/xcm-executor/src/traits/export.rs b/polkadot/xcm/xcm-executor/src/traits/export.rs index b356e0da7df7..3e9275edab37 100644 --- a/polkadot/xcm/xcm-executor/src/traits/export.rs +++ b/polkadot/xcm/xcm-executor/src/traits/export.rs @@ -108,7 +108,7 @@ impl ExportXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_export( network: NetworkId, channel: u32, @@ -120,7 +120,7 @@ pub fn validate_export( } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/xcm-runtime-apis/Cargo.toml b/polkadot/xcm/xcm-runtime-apis/Cargo.toml index 9ccca76c321c..9ada69a1933b 100644 --- a/polkadot/xcm/xcm-runtime-apis/Cargo.toml +++ b/polkadot/xcm/xcm-runtime-apis/Cargo.toml @@ -60,4 +60,5 @@ runtime-benchmarks = [ "pallet-xcm/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index 2d14b4e571c6..c3046b134d1f 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -353,3 +353,26 @@ fn dry_run_xcm() { ); }); } + +#[test] +fn calling_payment_api_with_a_lower_version_works() { + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + let lower_version_xcm_to_weigh = versioned_xcm_to_weigh.into_version(XCM_VERSION - 1).unwrap(); + let client = TestClient; + let runtime_api = client.runtime_api(); + let xcm_weight = + runtime_api.query_xcm_weight(H256::zero(), lower_version_xcm_to_weigh).unwrap(); + assert!(xcm_weight.is_ok()); + let native_token = VersionedAssetId::from(AssetId(Here.into())); + let lower_version_native_token = native_token.into_version(XCM_VERSION - 1).unwrap(); + let execution_fees = runtime_api + .query_weight_to_asset_fee(H256::zero(), xcm_weight.unwrap(), lower_version_native_token) + .unwrap(); + assert!(execution_fees.is_ok()); +} diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs index f0a5be908f69..fb5d1ae7c0e5 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -453,7 +453,8 @@ sp_api::mock_impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == HereLocation::get() => { Ok(WeightToFee::weight_to_fee(&weight)) }, diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index c7caa49393ed..47900e226d48 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index e0aff9b7782a..43f36fc8991a 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -48,4 +50,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs index bbac44ed8a1f..f971812f4f4d 100644 --- a/polkadot/xcm/xcm-simulator/example/src/tests.rs +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -47,6 +47,7 @@ fn dmp() { Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, call: remark.encode().into(), + fallback_max_weight: None, }]), )); }); @@ -74,6 +75,7 @@ fn ump() { Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, call: remark.encode().into(), + fallback_max_weight: None, }]), )); }); @@ -101,6 +103,7 @@ fn xcmp() { Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, call: remark.encode().into(), + fallback_max_weight: None, }]), )); }); @@ -388,6 +391,7 @@ fn reserve_asset_class_create_and_reserve_transfer() { ) .encode() .into(), + fallback_max_weight: None, }]); // Send creation. assert_ok!(RelayChainPalletXcm::send_xcm(Here, Parachain(1), message)); diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index 04f8ba115173..a2e36db95ba6 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -59,6 +59,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] [[bin]] diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml index 9b3576eaa3c2..046d707cc1e8 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml @@ -37,4 +37,4 @@ onboard_as_parachain = false [parachains.collator] name = "collator2000" command = "polkadot-parachain" - args = [ "-lparachain=debug" ] + args = [ "-lparachain=debug", "--experimental-use-slot-based" ] diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl index 7ba896e1c903..0cfc29f532d1 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl @@ -12,7 +12,7 @@ validator: parachain 2000 block height is at least 10 within 200 seconds # Register the second core assigned to this parachain. alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds -alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml index 745c4f9e24b1..d3ff00002242 100644 --- a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml +++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml @@ -36,4 +36,4 @@ chain = "glutton-westend-local-2000" name = "collator-2000" image = "{{CUMULUS_IMAGE}}" command = "polkadot-parachain" - args = ["-lparachain=debug"] + args = ["-lparachain=debug", "--experimental-use-slot-based"] diff --git a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml new file mode 100644 index 000000000000..43f3ef8f9e55 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml @@ -0,0 +1,58 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 3 + allowed_ancestry_len = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 4 + num_cores = 1 + lookahead = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + needed_approvals = 3 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.node_groups]] + name = "validator" + args = ["-lparachain=debug,parachain::collator-protocol=trace" ] + count = 4 + +[[parachains]] +id = 2000 +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-2000" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-2000" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug,parachain::collator-protocol=trace", "--experimental-use-slot-based"] + +[[parachains]] +id = 2001 +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-2001" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-2001" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] diff --git a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl new file mode 100644 index 000000000000..8892b03ac29c --- /dev/null +++ b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl @@ -0,0 +1,16 @@ +Description: CT shared core fairness test +Network: ./0019-coretime-collation-fetching-fairness.toml +Creds: config + +validator: reports node_roles is 4 + +validator-0: js-script ./force-register-paras.js with "2000,2001" return is 0 within 600 seconds +# core 0 is shared 3:1 between paras +validator-0: js-script ./assign-core.js with "0,2000,43200,2001,14400" return is 0 within 600 seconds + +collator-2000: reports block height is at least 9 within 200 seconds +collator-2001: reports block height is at least 3 within 10 seconds + +# hardcoded check to verify that included onchain events are indeed 3:1 +validator-0: js-script ./0019-verify-included-events.js return is 1 within 120 seconds + diff --git a/polkadot/zombienet_tests/functional/0019-verify-included-events.js b/polkadot/zombienet_tests/functional/0019-verify-included-events.js new file mode 100644 index 000000000000..6557a5a80e6b --- /dev/null +++ b/polkadot/zombienet_tests/functional/0019-verify-included-events.js @@ -0,0 +1,51 @@ +function parse_pjs_int(input) { + return parseInt(input.replace(/,/g, '')); +} + +async function run(nodeName, networkInfo) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + let blocks_per_para = {}; + + await new Promise(async (resolve, _) => { + let block_count = 0; + const unsubscribe = await api.query.system.events(async (events, block_hash) => { + block_count++; + + events.forEach((record) => { + const event = record.event; + + if (event.method != 'CandidateIncluded') { + return; + } + + let included_para_id = parse_pjs_int(event.toHuman().data[0].descriptor.paraId); + let relay_parent = event.toHuman().data[0].descriptor.relayParent; + if (blocks_per_para[included_para_id] == undefined) { + blocks_per_para[included_para_id] = 1; + } else { + blocks_per_para[included_para_id]++; + } + console.log(`CandidateIncluded for ${included_para_id}: block_offset=${block_count} relay_parent=${relay_parent}`); + }); + + if (block_count == 12) { + unsubscribe(); + return resolve(); + } + }); + }); + + console.log(`Result: 2000: ${blocks_per_para[2000]}, 2001: ${blocks_per_para[2001]}`); + // This check assumes that para 2000 runs slot based collator which respects its claim queue + // and para 2001 runs lookahead which generates blocks for each relay parent. + // + // For 12 blocks there will be one session change. One block won't have anything backed/included. + // In the next there will be one backed so for 12 blocks we should expect 10 included events - no + // more than 4 for para 2001 and at least 6 for para 2000. This should also cover the unlucky + // case when we observe two session changes during the 12 block period. + return (blocks_per_para[2000] >= 6) && (blocks_per_para[2001] <= 4); +} + +module.exports = { run }; diff --git a/prdoc/pr_4834.prdoc b/prdoc/pr_4834.prdoc new file mode 100644 index 000000000000..b7c8b15cb073 --- /dev/null +++ b/prdoc/pr_4834.prdoc @@ -0,0 +1,15 @@ +title: "xcm-executor: take delivery fee from transferred assets if necessary" + +doc: + - audience: Runtime Dev + description: | + In asset transfers, as a last resort, XCM delivery fees are taken from + transferred assets rather than failing the transfer. + +crates: + - name: staging-xcm-executor + bump: patch + - name: snowbridge-router-primitives + bump: patch + - name: snowbridge-pallet-inbound-queue + bump: patch diff --git a/prdoc/pr_4880.prdoc b/prdoc/pr_4880.prdoc new file mode 100644 index 000000000000..1bcd09088b5f --- /dev/null +++ b/prdoc/pr_4880.prdoc @@ -0,0 +1,31 @@ +title: Collation fetching fairness in collator protocol + +doc: + - audience: "Node Dev" + description: | + Implements collation fetching fairness in the validator side of the collator protocol. With + core time in place if two (or more) parachains share a single core no fairness was guaranteed + between them in terms of collation fetching. The current implementation was accepting up to + `max_candidate_depth + 1` seconded collations per relay parent and once this limit is reached + no new collations are accepted. A misbehaving collator can abuse this fact and prevent other + collators/parachains from advertising collations by advertising `max_candidate_depth + 1` + collations of its own. + To address this issue two changes are made: + 1. For each parachain id the validator accepts advertisements until the number of entries in + the claim queue equals the number of seconded candidates. + 2. When new collation should be fetched the validator inspects what was seconded so far, + what's in the claim queue and picks the first slot which hasn't got a collation seconded + and there is no candidate pending seconding for it. If there is an advertisement in the + waiting queue for it it is fetched. Otherwise the next free slot is picked. + These two changes guarantee that: + 1. Validator doesn't accept more collations than it can actually back. + 2. Each parachain has got a fair share of core time based on its allocations in the claim + queue. + +crates: + - name: polkadot-collator-protocol + bump: patch + - name: polkadot + bump: patch + - name: polkadot-node-subsystem-util + bump: minor \ No newline at end of file diff --git a/prdoc/pr_5703.prdoc b/prdoc/pr_5703.prdoc new file mode 100644 index 000000000000..3cef4468a87d --- /dev/null +++ b/prdoc/pr_5703.prdoc @@ -0,0 +1,13 @@ +title: Properly handle block gap created by fast sync + +doc: + - audience: Node Dev + description: | + Implements support for handling block gaps generated during fast sync. This includes managing the creation, + updating, and removal of block gaps. + Note that this feature is not fully activated until the `body` attribute is removed from the `LightState` + block request in chain sync, which will occur after the issue #5406 is resolved. + +crates: + - name: sc-client-db + bump: patch diff --git a/prdoc/pr_5723.prdoc b/prdoc/pr_5723.prdoc new file mode 100644 index 000000000000..ded5f9cebd1d --- /dev/null +++ b/prdoc/pr_5723.prdoc @@ -0,0 +1,24 @@ +title: Adds `BlockNumberProvider` in multisig, proxy and nft pallets + +doc: + - audience: Runtime Dev + description: | + This PR adds the ability for these pallets to specify their source of the block number. + This is useful when these pallets are migrated from the relay chain to a parachain and + vice versa. + + This change is backwards compatible: + 1. If the `BlockNumberProvider` continues to use the system pallet's block number + 2. When a pallet deployed on the relay chain is moved to a parachain, but still uses the + relay chain's block number + + However, we would need migrations if the deployed pallets are upgraded on an existing parachain, + and the `BlockNumberProvider` uses the relay chain block number. + +crates: + - name: pallet-multisig + bump: major + - name: pallet-proxy + bump: major + - name: pallet-nfts + bump: major diff --git a/prdoc/pr_5732.prdoc b/prdoc/pr_5732.prdoc new file mode 100644 index 000000000000..6f3f9b8a1668 --- /dev/null +++ b/prdoc/pr_5732.prdoc @@ -0,0 +1,29 @@ +title: Expose the unstable metadata v16 +doc: +- audience: Node Dev + description: | + This PR exposes the *unstable* metadata V16. The metadata is exposed under the unstable u32::MAX number. + Developers can start experimenting with the new features of the metadata v16. *Please note that this metadata is under development and expect breaking changes until stabilization.* + The `ExtrinsicMetadata` trait receives a breaking change. Its associated type `VERSION` is rename to `VERSIONS` and now supports a constant static list of metadata versions. + The versions implemented for `UncheckedExtrinsic` are v4 (legacy version) and v5 (new version). + For metadata collection, it is assumed that all `TransactionExtensions` are under version 0. + +crates: + - name: sp-metadata-ir + bump: major + - name: frame-support-procedural + bump: patch + - name: frame-support + bump: minor + - name: frame-support-test + bump: major + - name: frame-metadata-hash-extension + bump: patch + - name: substrate-wasm-builder + bump: minor + - name: pallet-revive + bump: minor + - name: sp-runtime + bump: major + - name: frame-benchmarking-cli + bump: patch diff --git a/prdoc/pr_5855.prdoc b/prdoc/pr_5855.prdoc new file mode 100644 index 000000000000..7735cfee9f37 --- /dev/null +++ b/prdoc/pr_5855.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove feature `test-helpers` from sc-service + +doc: + - audience: Node Dev + description: | + Removes feature `test-helpers` from sc-service. + +crates: + - name: sc-service + bump: major + - name: sc-rpc-spec-v2 + bump: major diff --git a/prdoc/pr_5899.prdoc b/prdoc/pr_5899.prdoc new file mode 100644 index 000000000000..fef810dd5f20 --- /dev/null +++ b/prdoc/pr_5899.prdoc @@ -0,0 +1,52 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove usage of AccountKeyring" + +doc: + - audience: Runtime Dev + description: | + Compared with AccountKeyring, Sr25519Keyring and Ed25519Keyring are more intuitive. + When both Sr25519Keyring and Ed25519Keyring are required, using AccountKeyring bring confusion. + There are two AccountKeyring definitions, it becomes more complex if export two AccountKeyring from frame. + +crates: + - name: frame-support + bump: patch + - name: sp-keyring + bump: major + - name: sc-service + bump: patch + - name: sc-chain-spec + bump: patch + - name: sc-rpc + bump: patch + - name: sc-transaction-pool + bump: patch + - name: sc-rpc-spec-v2 + bump: patch + - name: polkadot-node-metrics + bump: patch + - name: substrate-frame-rpc-system + bump: patch + - name: westend-runtime + bump: patch + - name: polkadot-sdk-frame + bump: patch + - name: rococo-runtime + bump: patch + - name: sc-basic-authorship + bump: patch + - name: bridge-hub-test-utils + bump: patch + - name: sc-consensus-manual-seal + bump: patch + - name: snowbridge-pallet-inbound-queue + bump: patch + - name: snowbridge-runtime-test-common + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + diff --git a/prdoc/pr_5997.prdoc b/prdoc/pr_5997.prdoc new file mode 100644 index 000000000000..6bac36a44586 --- /dev/null +++ b/prdoc/pr_5997.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement archive_unstable_storageDiff method + +doc: + - audience: Node Dev + description: | + This PR implements the `archive_unstable_storageDiff` rpc-v2 method. + Developers can use this method to fetch the storage differences + between two blocks. This is useful for oracles and archive nodes. + For more details see: https://github.com/paritytech/json-rpc-interface-spec/blob/main/src/api/archive_unstable_storageDiff.md. + +crates: + - name: sc-rpc-spec-v2 + bump: major + - name: sc-service + bump: patch diff --git a/prdoc/pr_6248.prdoc b/prdoc/pr_6248.prdoc new file mode 100644 index 000000000000..71fb0891cac6 --- /dev/null +++ b/prdoc/pr_6248.prdoc @@ -0,0 +1,16 @@ +title: Upgrade libp2p to 0.54.1 + +doc: + - audience: [Node Dev, Node Operator] + description: | + Upgrade libp2p from 0.52.4 to 0.54.1 + +crates: + - name: sc-network + bump: major + - name: sc-network-types + bump: minor + - name: sc-network-sync + bump: patch + - name: sc-telemetry + bump: minor diff --git a/prdoc/pr_6405.prdoc b/prdoc/pr_6405.prdoc new file mode 100644 index 000000000000..9e4e0b3c6c20 --- /dev/null +++ b/prdoc/pr_6405.prdoc @@ -0,0 +1,9 @@ +title: '`fatxpool`: handling limits and priorities improvements' +doc: +- audience: Node Dev + description: |- + This PR provides a number of improvements and fixes around handling limits and priorities in the fork-aware transaction pool. + +crates: +- name: sc-transaction-pool + bump: major diff --git a/prdoc/pr_6419.prdoc b/prdoc/pr_6419.prdoc new file mode 100644 index 000000000000..6cc155d64b91 --- /dev/null +++ b/prdoc/pr_6419.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use the custom target riscv32emac-unknown-none-polkavm +doc: + - audience: Runtime Dev + description: | + Closes: https://github.com/paritytech/polkadot-sdk/issues/6335 + +crates: +- name: substrate-wasm-builder + bump: patch diff --git a/prdoc/pr_6450.prdoc b/prdoc/pr_6450.prdoc new file mode 100644 index 000000000000..a9e927e45106 --- /dev/null +++ b/prdoc/pr_6450.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add omni-node checks for runtime parachain compatibility + +doc: + - audience: [ Node Dev, Runtime Dev ] + description: | + OmniNode parses runtime metadata and checks against the existence of `cumulus-pallet-parachain-system` + and `frame-system`, by filtering pallets by names: `ParachainSystem` and `System`. It also checks the + `frame-system` pallet storage `Number` type, and then uses it to configure AURA if `u32` or `u64`. + +crates: + - name: polkadot-omni-node-lib + bump: minor + - name: polkadot-sdk + bump: minor + - name: sc-runtime-utilities + bump: patch + - name: frame-benchmarking-cli + bump: major diff --git a/prdoc/pr_6459.prdoc b/prdoc/pr_6459.prdoc new file mode 100644 index 000000000000..592ba4c6b29d --- /dev/null +++ b/prdoc/pr_6459.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix version conversion in XcmPaymentApi::query_weight_to_asset_fee. + +doc: + - audience: Runtime Dev + description: | + The `query_weight_to_asset_fee` function of the `XcmPaymentApi` was trying + to convert versions in the wrong way. + This resulted in all calls made with lower versions failing. + The version conversion is now done correctly and these same calls will now succeed. + +crates: + - name: asset-hub-westend-runtime + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: xcm-runtime-apis + bump: patch + - name: assets-common + bump: patch diff --git a/prdoc/pr_6461.prdoc b/prdoc/pr_6461.prdoc new file mode 100644 index 000000000000..1b3d1e8b0364 --- /dev/null +++ b/prdoc/pr_6461.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json +title: '[pallet-revive] add support for all eth tx types' +doc: +- audience: Runtime Dev + description: Add support for 1559, 4844, and 2930 transaction types +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor + diff --git a/prdoc/pr_6481.prdoc b/prdoc/pr_6481.prdoc new file mode 100644 index 000000000000..83ba0a32eb24 --- /dev/null +++ b/prdoc/pr_6481.prdoc @@ -0,0 +1,10 @@ +title: 'slot-based-collator: Implement dedicated block import' +doc: +- audience: Node Dev + description: |- + The `SlotBasedBlockImport` job is to collect the storage proofs of all blocks getting imported. These storage proofs alongside the block are being forwarded to the collation task. Right now they are just being thrown away. More logic will follow later. Basically this will be required to include multiple blocks into one `PoV` which will then be done by the collation task. +crates: +- name: cumulus-client-consensus-aura + bump: major +- name: polkadot-omni-node-lib + bump: major diff --git a/prdoc/pr_6503.prdoc b/prdoc/pr_6503.prdoc new file mode 100644 index 000000000000..dc296a93f0eb --- /dev/null +++ b/prdoc/pr_6503.prdoc @@ -0,0 +1,10 @@ +title: "xcm: minor fix for compatibility with V4" + +doc: + - audience: ["Runtime Dev", "Runtime User"] + description: | + Following the removal of `Rococo`, `Westend` and `Wococo` from `NetworkId`, fixed `xcm::v5::NetworkId` encoding/decoding to be compatible with `xcm::v4::NetworkId` + +crates: +- name: staging-xcm + bump: patch diff --git a/prdoc/pr_6505.prdoc b/prdoc/pr_6505.prdoc new file mode 100644 index 000000000000..ae00dd17fed5 --- /dev/null +++ b/prdoc/pr_6505.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-broker] Fix auto renew benchmarks' +doc: +- audience: Runtime Dev + description: |- + Fix the broker pallet auto-renew benchmarks which have been broken since #4424, yielding `Weightless` due to some prices being set too low, as reported in #6474. + + Upon further investigation it turned out that the auto-renew contribution to `rotate_sale` was always failing but the error was mapped. This is also fixed at the cost of a bit of setup overhead. +crates: +- name: pallet-broker + bump: patch +- name: coretime-rococo-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch diff --git a/prdoc/pr_6506.prdoc b/prdoc/pr_6506.prdoc new file mode 100644 index 000000000000..7c6164a9959a --- /dev/null +++ b/prdoc/pr_6506.prdoc @@ -0,0 +1,10 @@ +title: Zero refund check for FungibleAdapter +doc: +- audience: Runtime User + description: |- + `FungibleAdapter` will now check if the _refund amount_ is zero before calling deposit & emitting an event. + + Fixes https://github.com/paritytech/polkadot-sdk/issues/6469. +crates: +- name: pallet-transaction-payment + bump: patch diff --git a/prdoc/pr_6509.prdoc b/prdoc/pr_6509.prdoc new file mode 100644 index 000000000000..74215fe0084c --- /dev/null +++ b/prdoc/pr_6509.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Migrate pallet-democracy benchmark to v2 + +doc: + - audience: Runtime Dev + description: | + "Part of issue #6202." + +crates: +- name: pallet-democracy + bump: patch diff --git a/prdoc/pr_6521.prdoc b/prdoc/pr_6521.prdoc new file mode 100644 index 000000000000..6f4acf8d028b --- /dev/null +++ b/prdoc/pr_6521.prdoc @@ -0,0 +1,10 @@ +title: Pure state sync refactoring (part-2) + +doc: +- audience: Node Dev + description: | + This is the last part of the pure refactoring of state sync, focusing on encapsulating `StateSyncMetadata` as a separate entity. + +crates: +- name: sc-network-sync + bump: none diff --git a/prdoc/pr_6522.prdoc b/prdoc/pr_6522.prdoc new file mode 100644 index 000000000000..bd59e9cb08dc --- /dev/null +++ b/prdoc/pr_6522.prdoc @@ -0,0 +1,18 @@ +title: Removes constraint in BlockNumberProvider from treasury + +doc: +- audience: Runtime Dev + description: |- + https://github.com/paritytech/polkadot-sdk/pull/3970 updated the treasury pallet to support + relay chain block number provider. However, it added a constraint to the `BlockNumberProvider` + trait to have the same block number type as `frame_system`: + + ```rust + type BlockNumberProvider: BlockNumberProvider>; + ``` + + This PR removes that constraint and allows the treasury pallet to use any block number type. + +crates: +- name: pallet-treasury + bump: major \ No newline at end of file diff --git a/prdoc/pr_6533.prdoc b/prdoc/pr_6533.prdoc new file mode 100644 index 000000000000..eb72a97db0f8 --- /dev/null +++ b/prdoc/pr_6533.prdoc @@ -0,0 +1,20 @@ +title: "Migrate executor into PolkaVM 0.18.0" +doc: + - audience: Runtime Dev + description: | + Bump `polkavm` to 0.18.0, and update `sc-polkavm-executor` to be + compatible with the API changes. In addition, bump also `polkavm-derive` + and `polkavm-linker` in order to make sure that the all parts of the + Polkadot SDK use the exact same ABI for `.polkavm` binaries. + + Purely relying on RV32E/RV64E ABI is not possible, as PolkaVM uses a + RISCV-V alike ISA, which is derived from RV32E/RV64E but it is still its + own microarchitecture, i.e. not fully binary compatible. + +crates: + - name: sc-executor-common + bump: major + - name: sc-executor-polkavm + bump: minor + - name: substrate-wasm-builder + bump: minor diff --git a/prdoc/pr_6534.prdoc b/prdoc/pr_6534.prdoc new file mode 100644 index 000000000000..7a92fe3c857b --- /dev/null +++ b/prdoc/pr_6534.prdoc @@ -0,0 +1,10 @@ +title: Forward logging directives to Polkadot workers +doc: +- audience: Node Dev + description: |- + This pull request forward all the logging directives given to the node via `RUST_LOG` or `-l` to the workers, instead of only forwarding `RUST_LOG`. +crates: +- name: polkadot-node-core-pvf + bump: patch +- name: sc-tracing + bump: patch diff --git a/prdoc/pr_6536.prdoc b/prdoc/pr_6536.prdoc new file mode 100644 index 000000000000..676b5c131f17 --- /dev/null +++ b/prdoc/pr_6536.prdoc @@ -0,0 +1,24 @@ +title: Bridges testing improvements +doc: +- audience: Runtime Dev + description: |- + This PR includes: + - Refactored integrity tests to support standalone deployment of `pallet-bridge-messages`. + - Refactored the `open_and_close_bridge_works` test case to support multiple scenarios, such as: + 1. A local chain opening a bridge. + 2. Sibling parachains opening a bridge. + 3. The relay chain opening a bridge. + - Previously, we added instance support for `pallet-bridge-relayer` but overlooked updating the `DeliveryConfirmationPaymentsAdapter`. +crates: +- name: bridge-runtime-common + bump: patch +- name: pallet-bridge-relayers + bump: patch +- name: bridge-hub-rococo-runtime + bump: patch +- name: bridge-hub-westend-runtime + bump: patch +- name: bridge-hub-test-utils + bump: major +- name: parachains-runtimes-test-utils + bump: major diff --git a/prdoc/pr_6540.prdoc b/prdoc/pr_6540.prdoc new file mode 100644 index 000000000000..5e0305205521 --- /dev/null +++ b/prdoc/pr_6540.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Only allow apply slash to be executed if the slash amount is atleast ED + +doc: + - audience: Runtime User + description: | + This change prevents `pools::apply_slash` from being executed when the pending slash amount of the member is lower + than the ED. With this change, such small slashes will still be applied but only when member funds are withdrawn. + +crates: +- name: pallet-nomination-pools-runtime-api + bump: patch +- name: pallet-nomination-pools + bump: major diff --git a/prdoc/pr_6544.prdoc b/prdoc/pr_6544.prdoc new file mode 100644 index 000000000000..f2bc9627697d --- /dev/null +++ b/prdoc/pr_6544.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add and test events to conviction voting pallet + +doc: + - audience: Runtime Dev + description: | + Add event for the unlocking of an expired conviction vote's funds, and test recently added + voting events. + +crates: + - name: pallet-conviction-voting + bump: major diff --git a/prdoc/pr_6546.prdoc b/prdoc/pr_6546.prdoc new file mode 100644 index 000000000000..353578a7f58f --- /dev/null +++ b/prdoc/pr_6546.prdoc @@ -0,0 +1,13 @@ +title: Increase default trie cache size to 1GiB +doc: +- audience: Node Operator + description: "The default trie cache size before was set to `64MiB`, which is quite\ + \ low to achieve real speed ups. `1GiB` should be a reasonable number as the requirements\ + \ for validators/collators/full nodes are much higher when it comes to minimum\ + \ memory requirements. Also the cache will not use `1GiB` from the start and fills\ + \ over time. The setting can be changed by setting `--trie-cache-size BYTE_SIZE`.\ + The CLI option `--state-cache-size` is also removed, which was not having any effect anymore.\r\ + \n" +crates: +- name: sc-cli + bump: patch diff --git a/prdoc/pr_6549.prdoc b/prdoc/pr_6549.prdoc new file mode 100644 index 000000000000..61a64c724185 --- /dev/null +++ b/prdoc/pr_6549.prdoc @@ -0,0 +1,247 @@ +doc: [] + +crates: + - name: polkadot-sdk + bump: none + - name: asset-test-utils + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-pallet-parachain-system-proc-macro + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: staging-xcm + bump: none + - name: xcm-procedural + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: cumulus-primitives-proof-size-hostfunction + bump: none + - name: polkadot-runtime-common + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: polkadot-runtime-metrics + bump: none + - name: staging-xcm-executor + bump: none + - name: slot-range-helper + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-xcm + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: cumulus-primitives-aura + bump: none + - name: staging-parachain-info + bump: none + - name: cumulus-test-relay-sproof-builder + bump: none + - name: cumulus-client-cli + bump: none + - name: cumulus-client-collator + bump: none + - name: cumulus-client-consensus-common + bump: none + - name: cumulus-client-pov-recovery + bump: none + - name: cumulus-relay-chain-interface + bump: none + - name: polkadot-overseer + bump: none + - name: tracing-gum + bump: none + - name: tracing-gum-proc-macro + bump: none + - name: polkadot-node-metrics + bump: none + - name: polkadot-node-primitives + bump: none + - name: polkadot-erasure-coding + bump: none + - name: polkadot-node-subsystem + bump: none + - name: polkadot-node-subsystem-types + bump: none + - name: polkadot-node-network-protocol + bump: none + - name: polkadot-statement-table + bump: none + - name: polkadot-rpc + bump: none + - name: polkadot-service + bump: none + - name: cumulus-client-parachain-inherent + bump: none + - name: westend-runtime + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: westend-runtime-constants + bump: none + - name: polkadot-approval-distribution + bump: none + - name: polkadot-node-subsystem-util + bump: none + - name: polkadot-availability-bitfield-distribution + bump: none + - name: polkadot-availability-distribution + bump: none + - name: polkadot-availability-recovery + bump: none + - name: polkadot-node-core-approval-voting + bump: none + - name: polkadot-node-core-approval-voting-parallel + bump: none + - name: polkadot-node-core-av-store + bump: none + - name: polkadot-node-core-chain-api + bump: none + - name: polkadot-statement-distribution + bump: none + - name: polkadot-collator-protocol + bump: none + - name: polkadot-dispute-distribution + bump: none + - name: polkadot-gossip-support + bump: none + - name: polkadot-network-bridge + bump: none + - name: polkadot-node-collation-generation + bump: none + - name: polkadot-node-core-backing + bump: none + - name: polkadot-node-core-bitfield-signing + bump: none + - name: polkadot-node-core-candidate-validation + bump: none + - name: polkadot-node-core-pvf + bump: none + - name: polkadot-node-core-pvf-common + bump: none + - name: polkadot-node-core-pvf-execute-worker + bump: none + - name: polkadot-node-core-pvf-prepare-worker + bump: none + - name: staging-tracking-allocator + bump: none + - name: rococo-runtime + bump: none + - name: rococo-runtime-constants + bump: none + - name: polkadot-node-core-chain-selection + bump: none + - name: polkadot-node-core-dispute-coordinator + bump: none + - name: polkadot-node-core-parachains-inherent + bump: none + - name: polkadot-node-core-prospective-parachains + bump: none + - name: polkadot-node-core-provisioner + bump: none + - name: polkadot-node-core-pvf-checker + bump: none + - name: polkadot-node-core-runtime-api + bump: none + - name: cumulus-client-network + bump: none + - name: cumulus-relay-chain-inprocess-interface + bump: none + - name: polkadot-cli + bump: none + - name: cumulus-client-consensus-aura + bump: none + - name: cumulus-client-consensus-proposer + bump: none + - name: cumulus-client-consensus-relay-chain + bump: none + - name: cumulus-client-service + bump: none + - name: cumulus-relay-chain-minimal-node + bump: none + - name: cumulus-relay-chain-rpc-interface + bump: none + - name: parachains-common + bump: none + - name: cumulus-primitives-utility + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: parachains-runtimes-test-utils + bump: none + - name: assets-common + bump: none + - name: bridge-hub-common + bump: none + - name: bridge-hub-test-utils + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: cumulus-primitives-timestamp + bump: none + - name: emulated-integration-tests-common + bump: none + - name: xcm-emulator + bump: none + - name: pallet-collective-content + bump: none + - name: xcm-simulator + bump: none + - name: pallet-revive-fixtures + bump: none + - name: polkadot-omni-node-lib + bump: none + - name: snowbridge-runtime-test-common + bump: none + - name: testnet-parachains-constants + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: polkadot-omni-node + bump: none + - name: polkadot-parachain-bin + bump: none + - name: polkadot + bump: none + - name: polkadot-voter-bags + bump: none + - name: xcm-simulator-example + bump: none diff --git a/prdoc/pr_6553.prdoc b/prdoc/pr_6553.prdoc new file mode 100644 index 000000000000..8692eba3a9f5 --- /dev/null +++ b/prdoc/pr_6553.prdoc @@ -0,0 +1,13 @@ +title: Ensure sync event is processed on unknown peer roles + +doc: + - audience: Node Dev + description: | + The GossipEngine::poll_next implementation polls both the notification_service and the sync_event_stream. + This PR ensures both events are processed gracefully. + +crates: + - name: sc-network-gossip + bump: patch + - name: sc-network-sync + bump: patch diff --git a/prdoc/pr_6561.prdoc b/prdoc/pr_6561.prdoc new file mode 100644 index 000000000000..714521925a6b --- /dev/null +++ b/prdoc/pr_6561.prdoc @@ -0,0 +1,11 @@ +title: 'slot-based-collator: Move spawning of the futures' +doc: +- audience: Node Dev + description: "Move spawning of the slot-based collator into the `run` function.\ + \ Also the tasks are being spawned as blocking task and not just as normal tasks.\r\ + \n" +crates: +- name: cumulus-client-consensus-aura + bump: major +- name: polkadot-omni-node-lib + bump: major diff --git a/prdoc/pr_6562.prdoc b/prdoc/pr_6562.prdoc new file mode 100644 index 000000000000..250b656aefb5 --- /dev/null +++ b/prdoc/pr_6562.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Hide nonce implementation details in metadata + +doc: + - audience: Runtime Dev + description: | + Use custom implementation of TypeInfo for TypeWithDefault to show inner value's type info. + This should bring back nonce to u64 in metadata. + +crates: +- name: sp-runtime + bump: minor \ No newline at end of file diff --git a/prdoc/pr_6565.prdoc b/prdoc/pr_6565.prdoc new file mode 100644 index 000000000000..f9a75a16a6a7 --- /dev/null +++ b/prdoc/pr_6565.prdoc @@ -0,0 +1,35 @@ +title: 'pallet_revive: Switch to 64bit RISC-V' +doc: +- audience: Runtime Dev + description: |- + This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. + + Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. + + ## Fixtures + + The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. + + ## Syscall interface + + ### Passing pointer + + Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. + + ### Functions with more than 6 arguments + + We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. + + This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. + + ## TODO + - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-proc-macro + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6566.prdoc b/prdoc/pr_6566.prdoc new file mode 100644 index 000000000000..bbd48b799538 --- /dev/null +++ b/prdoc/pr_6566.prdoc @@ -0,0 +1,45 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCMv5 - SetHints instruction + +doc: + - audience: Runtime Dev + description: | + Implementation of fellowship RFC 107. + The new SetHints instruction is a repackaging of SetAssetClaimer that also allows future + "hints" which alter the default behaviour of the executor. + The AllowTopLevelPaidExecutionFrom barrier allows this instruction between WithdrawAsset and + BuyExecution/PayFees to configure things before the actual meat of the program. + +crates: + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major + - name: people-rococo-runtime + bump: major + - name: people-westend-runtime + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: pallet-xcm-benchmarks + bump: major + - name: xcm-procedural + bump: minor + - name: staging-xcm + bump: major + - name: staging-xcm-builder + bump: major + - name: staging-xcm-executor + bump: major diff --git a/prdoc/pr_6583.prdoc b/prdoc/pr_6583.prdoc new file mode 100644 index 000000000000..0e67ed33e27c --- /dev/null +++ b/prdoc/pr_6583.prdoc @@ -0,0 +1,7 @@ +title: Bump Westend AH +doc: +- audience: Runtime Dev + description: Bump Asset-Hub westend spec version +crates: +- name: asset-hub-westend-runtime + bump: minor diff --git a/prdoc/pr_6588.prdoc b/prdoc/pr_6588.prdoc new file mode 100644 index 000000000000..bf44b2ed3784 --- /dev/null +++ b/prdoc/pr_6588.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "rpc server: fix subscription id_provider being reset to default one" + +doc: + - audience: Node Dev + description: | + The modification ensures that the id_provider variable is cloned instead of taken, which can help prevent issues related id provider being reset to the default. + + +crates: + - name: sc-rpc-server + bump: patch \ No newline at end of file diff --git a/prdoc/pr_6603.prdoc b/prdoc/pr_6603.prdoc new file mode 100644 index 000000000000..20c5e7294dfa --- /dev/null +++ b/prdoc/pr_6603.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Always provide main protocol name in litep2p responses + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR aligns litep2p behavior with libp2p. Previously, litep2p network backend + would provide the actual negotiated request-response protocol that produced a + response message. After this PR, only the main protocol name is reported to other + subsystems. + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/pr_6604.prdoc b/prdoc/pr_6604.prdoc new file mode 100644 index 000000000000..dc198287ff67 --- /dev/null +++ b/prdoc/pr_6604.prdoc @@ -0,0 +1,106 @@ +title: 'dmp: Check that the para exist before delivering a message' +doc: +- audience: Runtime Dev + description: | + Ensure that a para exists before trying to deliver a message to it. + Besides that `ensure_successful_delivery` function is added to `SendXcm`. This function + should be used by benchmarks to ensure that the delivery of a Xcm will work in the benchmark. +crates: +- name: polkadot-runtime-parachains + bump: major +- name: polkadot-runtime-common + bump: major +- name: polkadot-parachain-primitives + bump: major +- name: rococo-runtime + bump: major +- name: westend-runtime + bump: major +- name: pallet-xcm-benchmarks + bump: major +- name: pallet-xcm + bump: major +- name: cumulus-pallet-parachain-system + bump: major +- name: staging-xcm + bump: major +- name: staging-xcm-builder + bump: major +- name: bridge-runtime-common + bump: major +- name: pallet-xcm-bridge-hub-router + bump: major +- name: pallet-xcm-bridge-hub + bump: major +- name: snowbridge-pallet-inbound-queue + bump: major +- name: snowbridge-pallet-system + bump: major +- name: snowbridge-core + bump: major +- name: snowbridge-router-primitives + bump: major +- name: snowbridge-runtime-common + bump: major +- name: snowbridge-runtime-test-common + bump: major +- name: cumulus-pallet-dmp-queue + bump: major +- name: cumulus-pallet-xcmp-queue + bump: major +- name: parachains-common + bump: major +- name: asset-hub-rococo-runtime + bump: major +- name: asset-hub-westend-runtime + bump: major +- name: assets-common + bump: major +- name: bridge-hub-rococo-runtime + bump: major +- name: bridge-hub-westend-runtime + bump: major +- name: bridge-hub-common + bump: major +- name: collectives-westend-runtime + bump: major +- name: contracts-rococo-runtime + bump: major +- name: coretime-rococo-runtime + bump: major +- name: coretime-westend-runtime + bump: major +- name: glutton-westend-runtime + bump: major +- name: people-rococo-runtime + bump: major +- name: people-westend-runtime + bump: major +- name: penpal-runtime + bump: major +- name: rococo-parachain-runtime + bump: major +- name: polkadot-parachain-bin + bump: major +- name: cumulus-primitives-core + bump: major +- name: cumulus-primitives-utility + bump: major +- name: polkadot-service + bump: major +- name: staging-xcm-executor + bump: major +- name: xcm-runtime-apis + bump: major +- name: xcm-simulator-example + bump: major +- name: pallet-contracts + bump: major +- name: pallet-contracts-mock-network + bump: major +- name: pallet-revive + bump: major +- name: pallet-revive-mock-network + bump: major +- name: polkadot-sdk + bump: major diff --git a/prdoc/pr_6605.prdoc b/prdoc/pr_6605.prdoc new file mode 100644 index 000000000000..2adb1d8aee35 --- /dev/null +++ b/prdoc/pr_6605.prdoc @@ -0,0 +1,10 @@ +title: Notify telemetry only every second about the tx pool status +doc: +- audience: Node Operator + description: |- + Before this was done for every imported transaction. When a lot of transactions got imported, the import notification channel was filled. The underlying problem was that the `status` call is read locking the `validated_pool` which will be write locked by the internal submitting logic. Thus, the submitting and status reading was interferring which each other. +crates: +- name: cumulus-client-service + bump: patch +- name: sc-service + bump: patch diff --git a/prdoc/pr_6608.prdoc b/prdoc/pr_6608.prdoc new file mode 100644 index 000000000000..b9cd7008de47 --- /dev/null +++ b/prdoc/pr_6608.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] eth-prc fix geth diff' +doc: +- audience: Runtime Dev + description: |- + * Add a bunch of differential tests to ensure that responses from eth-rpc matches the one from `geth` + * EVM RPC server will not fail gas_estimation if no gas is specified, I updated pallet-revive to add an extra `skip_transfer` boolean check to replicate this behavior in our pallet + * `eth_transact` and `bare_eth_transact` api have been updated to use `GenericTransaction` directly as this is what is used by `eth_estimateGas` and `eth_call` +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor +- name: asset-hub-westend-runtime + bump: minor diff --git a/prdoc/pr_6624.prdoc b/prdoc/pr_6624.prdoc new file mode 100644 index 000000000000..4db55a46e8df --- /dev/null +++ b/prdoc/pr_6624.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use `cmd_lib` instead of `std::process::Command` when using `#[docify::export]` + +doc: + - audience: Runtime Dev + description: | + Simplified the display of commands and ensured they are tested for chain spec builder's `polkadot-sdk` reference docs. + +crates: [] \ No newline at end of file diff --git a/prdoc/pr_6628.prdoc b/prdoc/pr_6628.prdoc new file mode 100644 index 000000000000..7ea0c4968385 --- /dev/null +++ b/prdoc/pr_6628.prdoc @@ -0,0 +1,12 @@ +title: "Remove ReportCollator message" + +doc: + - audience: Node Dev + description: | + Remove unused message ReportCollator and test related to this message on the collator protocol validator side. + +crates: + - name: polkadot-node-subsystem-types + bump: patch + - name: polkadot-collator-protocol + bump: major \ No newline at end of file diff --git a/prdoc/pr_6636.prdoc b/prdoc/pr_6636.prdoc new file mode 100644 index 000000000000..1db5fd54d971 --- /dev/null +++ b/prdoc/pr_6636.prdoc @@ -0,0 +1,9 @@ +title: Optimize initialization of networking protocol benchmarks +doc: +- audience: Node Dev + description: |- + These changes should enhance the quality of benchmark results by excluding worker initialization time from the measurements and reducing the overall duration of the benchmarks. + +crates: +- name: sc-network + validate: false diff --git a/prdoc/pr_6643.prdoc b/prdoc/pr_6643.prdoc new file mode 100644 index 000000000000..c111f6356519 --- /dev/null +++ b/prdoc/pr_6643.prdoc @@ -0,0 +1,47 @@ +title: Added fallback_max_weight to Transact for sending messages to V4 chains +doc: +- audience: Runtime Dev + description: |- + Removing the `require_weight_at_most` parameter in V5 Transact introduced a problem when converting a message from V5 to V4 to send to chains that didn't upgrade yet. + The local chain doesn't know how to decode calls for remote chains so it can't automatically populate `require_weight_at_most` required by V4 Transact. + To fix this, XCM v5 Transact now also takes a `fallback_max_weight: Option` parameter. + This can be set to `None` if the instruction is not meant to be sent to chains running XCM versions lower than V5. + If set to `Some(weight)`, a subsequent conversion to V4 will result in `Transact { require_weight_at_most: weight, .. }`. + The plan is to remove this workaround in V6 since there will be a good conversion path from V6 to V5. +crates: +- name: snowbridge-router-primitives + bump: major +- name: emulated-integration-tests-common + bump: major +- name: asset-hub-rococo-runtime + bump: major +- name: asset-hub-westend-runtime + bump: major +- name: asset-test-utils + bump: major +- name: bridge-hub-rococo-runtime + bump: major +- name: bridge-hub-westend-runtime + bump: major +- name: coretime-rococo-runtime + bump: major +- name: coretime-westend-runtime + bump: major +- name: people-rococo-runtime + bump: major +- name: people-westend-runtime + bump: major +- name: parachains-runtimes-test-utils + bump: major +- name: polkadot-runtime-parachains + bump: major +- name: rococo-runtime + bump: major +- name: westend-runtime + bump: major +- name: staging-xcm + bump: major +- name: staging-xcm-builder + bump: major +- name: staging-xcm-executor + bump: major diff --git a/prdoc/pr_6645.prdoc b/prdoc/pr_6645.prdoc new file mode 100644 index 000000000000..f033cadc0b6e --- /dev/null +++ b/prdoc/pr_6645.prdoc @@ -0,0 +1,14 @@ +title: 'xcm: fix local/remote exports when inner routers return `NotApplicable`' +doc: +- audience: Runtime Dev + description: |- + Resolved a bug in the `local/remote exporters` used for bridging. Previously, they consumed `dest` and `msg` without returning them when inner routers/exporters failed with `NotApplicable`. This PR ensures compliance with the [`SendXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/src/v5/traits.rs#L449-L450) and [`ExportXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-executor/src/traits/export.rs#L44-L45) traits. +crates: +- name: staging-xcm-builder + bump: patch +- name: polkadot + bump: none +- name: staging-xcm + bump: none +- name: staging-xcm-executor + bump: none diff --git a/prdoc/pr_6646.prdoc b/prdoc/pr_6646.prdoc new file mode 100644 index 000000000000..4dcda8d41bda --- /dev/null +++ b/prdoc/pr_6646.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: OmniNode --dev flag starts node with manual seal + +doc: + - audience: [ Runtime Dev, Node Dev ] + description: | + `polkadot-omni-node` lib supports `--dev` flag now by allowing also to pass over a chain spec, + and starts the node with manual seal. It will seal the node at each `dev_block_time` milliseconds, + which can be set via `--dev-block-time`, and if not set will default to `3000ms`. + +crates: + - name: sc-cli + bump: patch + - name: polkadot-omni-node-lib + bump: patch + - name: polkadot-omni-node + bump: patch diff --git a/prdoc/pr_6652.prdoc b/prdoc/pr_6652.prdoc new file mode 100644 index 000000000000..a303311e138f --- /dev/null +++ b/prdoc/pr_6652.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "rpc server: re-use server builder per rpc interface" + +doc: + - audience: Node Dev + description: | + This changes that the RPC server builder is re-used for each RPC interface which is more efficient than to build it for every connection. + +crates: + - name: sc-rpc-server + bump: patch diff --git a/prdoc/pr_6665.prdoc b/prdoc/pr_6665.prdoc new file mode 100644 index 000000000000..b5aaf8a3b184 --- /dev/null +++ b/prdoc/pr_6665.prdoc @@ -0,0 +1,15 @@ +title: Fix runtime api impl detection by construct runtime +doc: +- audience: Runtime Dev + description: |- + Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. + + + Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 +crates: +- name: frame-support-procedural + bump: patch +- name: sp-api-proc-macro + bump: patch +- name: sp-metadata-ir + bump: patch diff --git a/prdoc/pr_6673.prdoc b/prdoc/pr_6673.prdoc new file mode 100644 index 000000000000..d2ca3c61ff39 --- /dev/null +++ b/prdoc/pr_6673.prdoc @@ -0,0 +1,7 @@ +title: 'chain-spec-guide-runtime: path to wasm blob fixed' +doc: +- audience: Runtime Dev + description: In `chain-spec-guide-runtime` crate's tests, there was assumption that + release version of wasm blob exists. This PR uses `chain_spec_guide_runtime::runtime::WASM_BINARY_PATH` + const to use correct path to runtime blob. +crates: [] diff --git a/prdoc/pr_6677.prdoc b/prdoc/pr_6677.prdoc new file mode 100644 index 000000000000..c6766889e68d --- /dev/null +++ b/prdoc/pr_6677.prdoc @@ -0,0 +1,11 @@ +title: 'chore: Update litep2p to v0.8.2' +doc: +- audience: Node Dev + description: |- + This includes a critical fix for debug release versions of litep2p (which are running in Kusama as validators). + + While at it, have stopped the oncall pain of alerts around `incoming_connections_total`. We can rethink the metric expose of litep2p in Q1. + +crates: +- name: sc-network + bump: minor diff --git a/prdoc/pr_6681.prdoc b/prdoc/pr_6681.prdoc new file mode 100644 index 000000000000..93a967d4a66c --- /dev/null +++ b/prdoc/pr_6681.prdoc @@ -0,0 +1,406 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: update scale-info to 2.11.6 + +doc: + - audience: Runtime Dev + description: | + Updates scale-info to 2.11.1 from 2.11.5. + Updated version of scale-info annotates generated code with `allow(deprecated)` + +crates: + - name: bridge-runtime-common + bump: none + - name: bp-header-chain + bump: none + - name: bp-runtime + bump: none + - name: frame-support + bump: none + - name: sp-core + bump: none + - name: sp-trie + bump: none + - name: sp-runtime + bump: none + - name: sp-application-crypto + bump: none + - name: sp-arithmetic + bump: none + - name: sp-weights + bump: none + - name: sp-api + bump: none + - name: sp-metadata-ir + bump: none + - name: sp-version + bump: none + - name: sp-inherents + bump: none + - name: frame-executive + bump: none + - name: frame-system + bump: none + - name: pallet-balances + bump: none + - name: frame-benchmarking + bump: none + - name: pallet-migrations + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-slots + bump: none + - name: sp-staking + bump: none + - name: staging-xcm + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: pallet-message-queue + bump: none + - name: polkadot-runtime-common + bump: none + - name: frame-election-provider-support + bump: none + - name: sp-npos-elections + bump: none + - name: sp-consensus-grandpa + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-grandpa + bump: none + - name: sp-genesis-builder + bump: none + - name: sp-consensus-babe + bump: none + - name: sp-mixnet + bump: none + - name: sc-rpc-api + bump: none + - name: sp-session + bump: none + - name: sp-statement-store + bump: none + - name: sp-transaction-storage-proof + bump: none + - name: pallet-asset-rate + bump: none + - name: pallet-authorship + bump: none + - name: pallet-babe + bump: none + - name: pallet-session + bump: none + - name: pallet-timestamp + bump: none + - name: pallet-offences + bump: none + - name: pallet-staking + bump: none + - name: pallet-bags-list + bump: none + - name: pallet-broker + bump: none + - name: pallet-election-provider-multi-phase + bump: none + - name: pallet-fast-unstake + bump: none + - name: pallet-identity + bump: none + - name: pallet-transaction-payment + bump: none + - name: pallet-treasury + bump: none + - name: pallet-utility + bump: none + - name: pallet-collective + bump: none + - name: pallet-root-testing + bump: none + - name: pallet-vesting + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: pallet-authority-discovery + bump: none + - name: pallet-mmr + bump: none + - name: sp-mmr-primitives + bump: none + - name: staging-xcm-executor + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-asset-conversion + bump: none + - name: pallet-assets + bump: none + - name: pallet-salary + bump: none + - name: pallet-ranked-collective + bump: none + - name: pallet-xcm + bump: none + - name: xcm-runtime-apis + bump: none + - name: pallet-grandpa + bump: none + - name: pallet-indices + bump: none + - name: pallet-sudo + bump: none + - name: sp-consensus-beefy + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: pallet-aura + bump: none + - name: sp-consensus-aura + bump: none + - name: pallet-collator-selection + bump: none + - name: pallet-glutton + bump: none + - name: staging-parachain-info + bump: none + - name: westend-runtime + bump: none + - name: frame-metadata-hash-extension + bump: none + - name: frame-system-benchmarking + bump: none + - name: pallet-beefy + bump: none + - name: pallet-beefy-mmr + bump: none + - name: pallet-conviction-voting + bump: none + - name: pallet-scheduler + bump: none + - name: pallet-preimage + bump: none + - name: pallet-delegated-staking + bump: none + - name: pallet-nomination-pools + bump: none + - name: pallet-democracy + bump: none + - name: pallet-elections-phragmen + bump: none + - name: pallet-membership + bump: none + - name: pallet-multisig + bump: none + - name: polkadot-sdk-frame + bump: none + - name: pallet-dev-mode + bump: none + - name: pallet-verify-signature + bump: none + - name: pallet-nomination-pools-benchmarking + bump: none + - name: pallet-offences-benchmarking + bump: none + - name: pallet-im-online + bump: none + - name: pallet-parameters + bump: none + - name: pallet-proxy + bump: none + - name: pallet-recovery + bump: none + - name: pallet-referenda + bump: none + - name: pallet-society + bump: none + - name: pallet-state-trie-migration + bump: none + - name: pallet-whitelist + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: rococo-runtime + bump: none + - name: pallet-bounties + bump: none + - name: pallet-child-bounties + bump: none + - name: pallet-nis + bump: none + - name: pallet-tips + bump: none + - name: parachains-common + bump: none + - name: pallet-asset-tx-payment + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: bp-xcm-bridge-hub-router + bump: none + - name: pallet-xcm-bridge-hub-router + bump: none + - name: assets-common + bump: none + - name: bp-messages + bump: none + - name: bp-parachains + bump: none + - name: bp-polkadot-core + bump: none + - name: bp-relayers + bump: none + - name: bp-xcm-bridge-hub + bump: none + - name: bridge-hub-common + bump: none + - name: snowbridge-core + bump: none + - name: snowbridge-beacon-primitives + bump: none + - name: snowbridge-ethereum + bump: none + - name: pallet-bridge-grandpa + bump: none + - name: pallet-bridge-messages + bump: none + - name: pallet-bridge-parachains + bump: none + - name: pallet-bridge-relayers + bump: none + - name: pallet-xcm-bridge-hub + bump: none + - name: cumulus-pallet-dmp-queue + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: frame-benchmarking-pallet-pov + bump: none + - name: pallet-alliance + bump: none + - name: pallet-asset-conversion-ops + bump: none + - name: pallet-asset-conversion-tx-payment + bump: none + - name: pallet-assets-freezer + bump: none + - name: pallet-atomic-swap + bump: none + - name: pallet-collective-content + bump: none + - name: pallet-contracts + bump: none + - name: pallet-contracts-uapi + bump: none + - name: pallet-insecure-randomness-collective-flip + bump: none + - name: pallet-contracts-mock-network + bump: none + - name: xcm-simulator + bump: none + - name: pallet-core-fellowship + bump: none + - name: pallet-lottery + bump: none + - name: pallet-mixnet + bump: none + - name: pallet-nft-fractionalization + bump: none + - name: pallet-nfts + bump: none + - name: pallet-node-authorization + bump: none + - name: pallet-paged-list + bump: none + - name: pallet-remark + bump: none + - name: pallet-revive + bump: none + - name: pallet-revive-uapi + bump: none + - name: pallet-revive-eth-rpc + bump: none + - name: pallet-skip-feeless-payment + bump: none + - name: pallet-revive-mock-network + bump: none + - name: pallet-root-offences + bump: none + - name: pallet-safe-mode + bump: none + - name: pallet-scored-pool + bump: none + - name: pallet-statement + bump: none + - name: pallet-transaction-storage + bump: none + - name: pallet-tx-pause + bump: none + - name: pallet-uniques + bump: none + - name: snowbridge-outbound-queue-merkle-tree + bump: none + - name: snowbridge-pallet-ethereum-client + bump: none + - name: snowbridge-pallet-inbound-queue + bump: none + - name: snowbridge-router-primitives + bump: none + - name: snowbridge-pallet-outbound-queue + bump: none + - name: snowbridge-pallet-system + bump: none + - name: bp-asset-hub-rococo + bump: none + - name: bp-asset-hub-westend + bump: none + - name: bp-polkadot-bulletin + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: penpal-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: xcm-simulator-example + bump: none \ No newline at end of file diff --git a/prdoc/pr_6690.prdoc b/prdoc/pr_6690.prdoc new file mode 100644 index 000000000000..0e4a2437ef96 --- /dev/null +++ b/prdoc/pr_6690.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix Possible bug, Vote import failed after aggression is enabled + +doc: + - audience: Node Dev + description: | + Fix the appearance of Possible bug: Vote import failed after aggression is enabled, the log itself is + harmless because approval gets imported anyway and aggression is able to distribute it, nevertheless + is something that can be easily be fixed by picking the highest required routing possible. + +crates: + - name: polkadot-node-network-protocol + bump: minor + - name: polkadot-approval-distribution + bump: minor diff --git a/prdoc/pr_6695.prdoc b/prdoc/pr_6695.prdoc new file mode 100644 index 000000000000..7a950e8546cd --- /dev/null +++ b/prdoc/pr_6695.prdoc @@ -0,0 +1,8 @@ +title: '[pallet-revive] bugfix decoding 64bit args in the decoder' +doc: +- audience: Runtime Dev + description: The argument index of the next argument is dictated by the size of + the current one. +crates: +- name: pallet-revive-proc-macro + bump: patch diff --git a/prdoc/pr_6696.prdoc b/prdoc/pr_6696.prdoc new file mode 100644 index 000000000000..c5c73f831886 --- /dev/null +++ b/prdoc/pr_6696.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Make approval-distribution aggression a bit more robust and less spammy + +doc: + - audience: Node Dev + description: | + The problem with the current implementation of approval-distribution aggression is that is too spammy, + and can overload the nodes, so make it less spammy by moving back the moment we trigger L2 aggression + and make resend enable only for the latest unfinalized block. + +crates: + - name: polkadot-approval-distribution + bump: minor diff --git a/prdoc/pr_6703.prdoc b/prdoc/pr_6703.prdoc new file mode 100644 index 000000000000..2dd0962a3eea --- /dev/null +++ b/prdoc/pr_6703.prdoc @@ -0,0 +1,7 @@ +title: 'network/libp2p-backend: Suppress warning adding already reserved node as reserved' +doc: +- audience: Node Dev + description: Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. +crates: +- name: sc-network + bump: patch diff --git a/prdoc/pr_6711.prdoc b/prdoc/pr_6711.prdoc new file mode 100644 index 000000000000..ec09035e1356 --- /dev/null +++ b/prdoc/pr_6711.prdoc @@ -0,0 +1,13 @@ +title: Expose DHT content providers API from `sc-network` +doc: +- audience: Node Dev + description: |- + Expose the Kademlia content providers API for the use by `sc-network` client code: + 1. Extend the `NetworkDHTProvider` trait with functions to start/stop providing content and query the DHT for the list of content providers for a given key. + 2. Extend the `DhtEvent` enum with events reporting the found providers or query failures. + 3. Implement the above for libp2p & litep2p network backends. +crates: +- name: sc-network + bump: major +- name: sc-authority-discovery + bump: major diff --git a/prdoc/pr_6728.prdoc b/prdoc/pr_6728.prdoc new file mode 100644 index 000000000000..68f61190d947 --- /dev/null +++ b/prdoc/pr_6728.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] eth-rpc add missing tests' +doc: +- audience: Runtime Dev + description: |- + Add tests for #6608 + + fix https://github.com/paritytech/contract-issues/issues/12 +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor diff --git a/prdoc/pr_6729.prdoc b/prdoc/pr_6729.prdoc new file mode 100644 index 000000000000..9eaa67363c9a --- /dev/null +++ b/prdoc/pr_6729.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix order of resending messages after restart + +doc: + - audience: Node Dev + description: | + At restart when dealing with a coalesced approval we might end up in a situation where we sent to + approval-distribution the approval before all assignments covering it, in that case, the approval + is ignored and never distribute, which will lead to no-shows. + +crates: + - name: polkadot-node-core-approval-voting + bump: minor diff --git a/prdoc/pr_6741.prdoc b/prdoc/pr_6741.prdoc new file mode 100644 index 000000000000..d4b795038bcd --- /dev/null +++ b/prdoc/pr_6741.prdoc @@ -0,0 +1,16 @@ +title: 'pallet-revive: Adjust error handling of sub calls' +doc: +- audience: Runtime Dev + description: |- + We were trapping the host context in case a sub call was exhausting the storage deposit limit set for this sub call. This prevents the caller from handling this error. In this PR we added a new error code that is returned when either gas or storage deposit limit is exhausted by the sub call. + + We also remove the longer used `NotCallable` error. No longer used because this is no longer an error: It will just be a balance transfer. + + We also make `set_code_hash` infallible to be consistent with other host functions which just trap on any error condition. +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-uapi + bump: major +- name: pallet-revive-fixtures + bump: major diff --git a/prdoc/pr_6742.prdoc b/prdoc/pr_6742.prdoc new file mode 100644 index 000000000000..92c3755a3c28 --- /dev/null +++ b/prdoc/pr_6742.prdoc @@ -0,0 +1,11 @@ +title: Update litep2p backend to v0.8.3 +doc: +- audience: Node Dev + description: |- + This release includes two fixes for small memory leaks on edge-cases in the notification and request-response protocols. + While at it, have downgraded a log message from litep2p. + +crates: +- name: sc-network + bump: patch + diff --git a/prdoc/pr_6743.prdoc b/prdoc/pr_6743.prdoc new file mode 100644 index 000000000000..4c35ff46ca67 --- /dev/null +++ b/prdoc/pr_6743.prdoc @@ -0,0 +1,10 @@ +title: 'umbrella: Remove `pallet-revive-fixtures`' +doc: +- audience: Runtime Dev + description: |- + No need to have them in the umbrella crate also by having them in the umbrella crate they are bleeding into the normal build. +crates: +- name: pallet-revive-fixtures + bump: major +- name: polkadot-sdk + bump: major diff --git a/prdoc/pr_6759.prdoc b/prdoc/pr_6759.prdoc new file mode 100644 index 000000000000..3dff12d740d4 --- /dev/null +++ b/prdoc/pr_6759.prdoc @@ -0,0 +1,16 @@ +title: 'pallet-revive: Statically verify imports on code deployment' +doc: +- audience: Runtime Dev + description: |- + Previously, we failed at runtime if an unknown or unstable host function was called. This requires us to keep track of when a host function was added and when a code was deployed. We used the `api_version` to track at which API version each code was deployed. This made sure that when a new host function was added that old code won't have access to it. This is necessary as otherwise the behavior of a contract that made calls to this previously non existent host function would change from "trap" to "do something". + + In this PR we remove the API version. Instead, we statically verify on upload that no non-existent host function is ever used in the code. This will allow us to add new host function later without needing to keep track when they were added. + + This simplifies the code and also gives an immediate feedback if unknown host functions are used. +crates: +- name: pallet-revive-proc-macro + bump: major +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major diff --git a/prdoc/pr_6760.prdoc b/prdoc/pr_6760.prdoc new file mode 100644 index 000000000000..8224b72fb0a4 --- /dev/null +++ b/prdoc/pr_6760.prdoc @@ -0,0 +1,9 @@ +title: 'chainHead: Always report discarded items for storage operations' +doc: +- audience: [Node Dev, Node Operator] + description: |- + This PR ensures that substrate always reports discarded items as zero. + This is needed to align with the rpc-v2 spec +crates: +- name: sc-rpc-spec-v2 + bump: patch diff --git a/prdoc/pr_6768.prdoc b/prdoc/pr_6768.prdoc new file mode 100644 index 000000000000..3e194078df26 --- /dev/null +++ b/prdoc/pr_6768.prdoc @@ -0,0 +1,14 @@ +title: '`basic-authorship`: debug level is now less spammy' +doc: +- audience: Node Dev + description: |- + The `debug` level in `sc-basic-authorship` is now less spammy. Previously it was outputing logs per individual transactions. It made quite hard to follow the logs (and also generates unneeded traffic in grafana). + + Now debug level only show some internal details, without spamming output with per-transaction logs. They were moved to `trace` level. + + I also added the `EndProposingReason` to the summary INFO message. This allows us to know what was the block limit (which is very useful for debugging). +crates: +- name: sc-basic-authorship + bump: major +- name: sc-proposer-metrics + bump: major diff --git a/prdoc/pr_6781.prdoc b/prdoc/pr_6781.prdoc new file mode 100644 index 000000000000..8090be420341 --- /dev/null +++ b/prdoc/pr_6781.prdoc @@ -0,0 +1,28 @@ +title: Bridges - revert-back congestion mechanism + +doc: +- audience: Runtime Dev + description: |- + With [permissionless lanes PR#4949](https://github.com/paritytech/polkadot-sdk/pull/4949), the congestion mechanism based on sending `Transact(report_bridge_status(is_congested))` from `pallet-xcm-bridge-hub` to `pallet-xcm-bridge-hub-router` was replaced with a congestion mechanism that relied on monitoring XCMP queues. However, this approach could cause issues, such as suspending the entire XCMP queue instead of isolating the affected bridge. This PR reverts back to using `report_bridge_status` as before. + +crates: +- name: pallet-xcm-bridge-hub-router + bump: patch +- name: pallet-xcm-bridge-hub + bump: patch +- name: bp-xcm-bridge-hub + bump: patch +- name: bp-asset-hub-rococo + bump: patch +- name: bp-asset-hub-westend + bump: patch +- name: asset-hub-rococo-runtime + bump: patch +- name: asset-hub-westend-runtime + bump: patch +- name: asset-test-utils + bump: patch +- name: bridge-hub-rococo-runtime + bump: patch +- name: bridge-hub-westend-runtime + bump: patch diff --git a/prdoc/pr_6792.prdoc b/prdoc/pr_6792.prdoc new file mode 100644 index 000000000000..80982a34b3e8 --- /dev/null +++ b/prdoc/pr_6792.prdoc @@ -0,0 +1,11 @@ +title: Add fallback_max_weight to snowbridge Transact +doc: +- audience: Runtime Dev + description: |- + We removed the `require_weight_at_most` field and later changed it to `fallback_max_weight`. + This was to have a fallback when sending a message to v4 chains, which happens in the small time window when chains are upgrading. + We originally put no fallback for a message in snowbridge's inbound queue but we should have one. + This PR adds it. +crates: +- name: snowbridge-router-primitives + bump: patch diff --git a/prdoc/pr_6796.prdoc b/prdoc/pr_6796.prdoc new file mode 100644 index 000000000000..aeb305847bf8 --- /dev/null +++ b/prdoc/pr_6796.prdoc @@ -0,0 +1,9 @@ +title: 'pallet-revive: Remove unused dependencies' +doc: +- audience: Runtime Dev + description: The dependency on `pallet_balances` doesn't seem to be necessary. At + least everything compiles for me without it. Removed this dependency and a few + others that seem to be left overs. +crates: +- name: pallet-revive + bump: major diff --git a/prdoc/pr_6814.prdoc b/prdoc/pr_6814.prdoc new file mode 100644 index 000000000000..4edbf2f8ed28 --- /dev/null +++ b/prdoc/pr_6814.prdoc @@ -0,0 +1,32 @@ +title: Add aliasers to westend chains +doc: +- audience: Runtime Dev + description: |- + `InitiateTransfer`, the new instruction introduced in XCMv5, allows preserving the origin after a cross-chain transfer via the usage of the `AliasOrigin` instruction. The receiving chain needs to be configured to allow such this instruction to have its intended effect and not just throw an error. + + In this PR, I add the alias rules specified in the [RFC for origin preservation](https://github.com/polkadot-fellows/RFCs/blob/main/text/0122-alias-origin-on-asset-transfers.md) to westend chains so we can test these scenarios in the testnet. + + The new scenarios include: + - Sending a cross-chain transfer from one system chain to another and doing a Transact on the same message (1 hop) + - Sending a reserve asset transfer from one chain to another going through asset hub and doing Transact on the same message (2 hops) + + The updated chains are: + - Relay: added `AliasChildLocation` + - Collectives: added `AliasChildLocation` and `AliasOriginRootUsingFilter` + - People: added `AliasChildLocation` and `AliasOriginRootUsingFilter` + - Coretime: added `AliasChildLocation` and `AliasOriginRootUsingFilter` + + AssetHub already has `AliasChildLocation` and doesn't need the other config item. + BridgeHub is not intended to be used by end users so I didn't add any config item. + Only added `AliasChildOrigin` to the relay since we intend for it to be used less. +crates: +- name: westend-runtime + bump: patch +- name: collectives-westend-runtime + bump: patch +- name: people-westend-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch +- name: pallet-xcm-benchmarks + bump: patch diff --git a/prdoc/pr_6832.prdoc b/prdoc/pr_6832.prdoc new file mode 100644 index 000000000000..bd0abbfba853 --- /dev/null +++ b/prdoc/pr_6832.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove collation-generation subsystem from validator nodes" + +doc: + - audience: Node Dev + description: | + Collation-generation is only needed for Collators, and therefore not needed for validators + +crates: + - name: polkadot-service + bump: patch \ No newline at end of file diff --git a/prdoc/pr_6835.prdoc b/prdoc/pr_6835.prdoc new file mode 100644 index 000000000000..73d1a81e761c --- /dev/null +++ b/prdoc/pr_6835.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] implement the call data load API' +doc: +- audience: Runtime Dev + description: |- + This PR implements the call data load API akin to [how it works on ethereum](https://www.evm.codes/?fork=cancun#35). +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor diff --git a/prdoc/pr_6844.prdoc b/prdoc/pr_6844.prdoc new file mode 100644 index 000000000000..32901bf04df9 --- /dev/null +++ b/prdoc/pr_6844.prdoc @@ -0,0 +1,8 @@ +title: 'pallet-revive: disable host functions unused in solidity PolkaVM compiler' +doc: +- audience: Runtime Dev + description: Disables host functions in contracts that are not enabled + in solidity PolkaVM compiler to reduce surface of possible attack vectors. +crates: +- name: pallet-revive + bump: major diff --git a/prdoc/pr_6857.prdoc b/prdoc/pr_6857.prdoc new file mode 100644 index 000000000000..3930f5910487 --- /dev/null +++ b/prdoc/pr_6857.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] implement the call data size API' +doc: +- audience: Runtime Dev + description: |- + This PR adds an API method to query the contract call data input size. + + Part of #6770 +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor diff --git a/prdoc/pr_6860.prdoc b/prdoc/pr_6860.prdoc new file mode 100644 index 000000000000..76b460ce52dd --- /dev/null +++ b/prdoc/pr_6860.prdoc @@ -0,0 +1,10 @@ +title: Update litep2p network backend to v0.8.4 + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR updates the Litep2p network backend to version 0.8.4 + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/pr_6863.prdoc b/prdoc/pr_6863.prdoc new file mode 100644 index 000000000000..0dd416e5e438 --- /dev/null +++ b/prdoc/pr_6863.prdoc @@ -0,0 +1,9 @@ +title: Update merkleized-metadata to 0.2.0 +doc: +- audience: Node Dev + description: |- + 0.1.2 was yanked as it was breaking semver. +crates: + - name: substrate-wasm-builder + bump: patch + validate: false diff --git a/prdoc/pr_6864.prdoc b/prdoc/pr_6864.prdoc new file mode 100644 index 000000000000..6d6c84e22da4 --- /dev/null +++ b/prdoc/pr_6864.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix approval-voting canonicalize off by one + +doc: + - audience: Node Dev + description: | + The approval-voting canonicalize was off by one, which lead to blocks being + cleaned up every other 2 blocks. Normally, this is not an issue, but on restart + we might end up sending NewBlocks to approval-distribution with finalized blocks. + This would be problematic in the case were finalization was already lagging before + restart, so after restart approval-distribution will trigger aggression on the wrong + already finalized block. + +crates: + - name: polkadot-node-core-approval-voting + bump: minor diff --git a/prdoc/pr_6865.prdoc b/prdoc/pr_6865.prdoc new file mode 100644 index 000000000000..c0581f2af24f --- /dev/null +++ b/prdoc/pr_6865.prdoc @@ -0,0 +1,9 @@ +title: Rename PanicInfo to PanicHookInfo +doc: +- audience: Node Dev + description: Starting with Rust 1.82 `PanicInfo` is deprecated and will throw warnings + when used. The new type is available since Rust 1.81 and should be available on + our CI. +crates: +- name: sp-panic-handler + bump: patch diff --git a/prdoc/pr_6866.prdoc b/prdoc/pr_6866.prdoc new file mode 100644 index 000000000000..fac40dc103d7 --- /dev/null +++ b/prdoc/pr_6866.prdoc @@ -0,0 +1,13 @@ +title: Refactor `pallet-revive-uapi` pallet +doc: +- audience: Runtime Dev + description: Puts unstable host functions in `uapi` under + `unstable-api` feature while moving those functions after + stable functions. +crates: +- name: pallet-revive + bump: patch +- name: pallet-revive-fixtures + bump: patch +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6885.prdoc b/prdoc/pr_6885.prdoc new file mode 100644 index 000000000000..986d76962289 --- /dev/null +++ b/prdoc/pr_6885.prdoc @@ -0,0 +1,11 @@ +title: 'Omni-node: Detect pending code in storage and send go ahead signal in dev-mode.' +doc: +- audience: Runtime Dev + description: |- + When using the polkadot-omni-node with manual seal (`--dev-block-time`), it is now possible to perform runtime + upgrades. The node will detect the pending validation code and send a go-ahead signal to the parachain. +crates: +- name: cumulus-client-parachain-inherent + bump: major +- name: polkadot-omni-node-lib + bump: patch diff --git a/prdoc/pr_6889.prdoc b/prdoc/pr_6889.prdoc new file mode 100644 index 000000000000..01edd49b685a --- /dev/null +++ b/prdoc/pr_6889.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove polkadot-omni-node-lib unused dependency + +doc: + - audience: Node Dev + description: + Removed an unused dependency for `polkadot-omni-node-lib`. + +crates: + - name: polkadot-omni-node-lib + bump: patch diff --git a/scripts/generate-umbrella.py b/scripts/generate-umbrella.py index 8326909c3449..ae3873180553 100644 --- a/scripts/generate-umbrella.py +++ b/scripts/generate-umbrella.py @@ -120,6 +120,8 @@ def main(path, version): "edition": { "workspace": True }, "authors": { "workspace": True }, "description": "Polkadot SDK umbrella crate.", + "homepage": { "workspace": True }, + "repository": { "workspace": True }, "license": "Apache-2.0", "metadata": { "docs": { "rs": { "features": ["runtime-full", "node"], diff --git a/substrate/.config/nextest.toml b/substrate/.config/nextest.toml deleted file mode 100644 index eb0ed09cad92..000000000000 --- a/substrate/.config/nextest.toml +++ /dev/null @@ -1,124 +0,0 @@ -# This is the default config used by nextest. It is embedded in the binary at -# build time. It may be used as a template for .config/nextest.toml. - -[store] -# The directory under the workspace root at which nextest-related files are -# written. Profile-specific storage is currently written to dir/. -dir = "target/nextest" - -# This section defines the default nextest profile. Custom profiles are layered -# on top of the default profile. -[profile.default] -# "retries" defines the number of times a test should be retried. If set to a -# non-zero value, tests that succeed on a subsequent attempt will be marked as -# non-flaky. Can be overridden through the `--retries` option. -# Examples -# * retries = 3 -# * retries = { backoff = "fixed", count = 2, delay = "1s" } -# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } -retries = 5 - -# The number of threads to run tests with. Supported values are either an integer or -# the string "num-cpus". Can be overridden through the `--test-threads` option. -test-threads = "num-cpus" - -# The number of threads required for each test. This is generally used in overrides to -# mark certain tests as heavier than others. However, it can also be set as a global parameter. -threads-required = 1 - -# Show these test statuses in the output. -# -# The possible values this can take are: -# * none: no output -# * fail: show failed (including exec-failed) tests -# * retry: show flaky and retried tests -# * slow: show slow tests -# * pass: show passed tests -# * skip: show skipped tests (most useful for CI) -# * all: all of the above -# -# Each value includes all the values above it; for example, "slow" includes -# failed and retried tests. -# -# Can be overridden through the `--status-level` flag. -status-level = "pass" - -# Similar to status-level, show these test statuses at the end of the run. -final-status-level = "flaky" - -# "failure-output" defines when standard output and standard error for failing tests are produced. -# Accepted values are -# * "immediate": output failures as soon as they happen -# * "final": output failures at the end of the test run -# * "immediate-final": output failures as soon as they happen and at the end of -# the test run; combination of "immediate" and "final" -# * "never": don't output failures at all -# -# For large test suites and CI it is generally useful to use "immediate-final". -# -# Can be overridden through the `--failure-output` option. -failure-output = "immediate" - -# "success-output" controls production of standard output and standard error on success. This should -# generally be set to "never". -success-output = "never" - -# Cancel the test run on the first failure. For CI runs, consider setting this -# to false. -fail-fast = true - -# Treat a test that takes longer than the configured 'period' as slow, and print a message. -# See for more information. -# -# Optional: specify the parameter 'terminate-after' with a non-zero integer, -# which will cause slow tests to be terminated after the specified number of -# periods have passed. -# Example: slow-timeout = { period = "60s", terminate-after = 2 } -slow-timeout = { period = "60s" } - -# Treat a test as leaky if after the process is shut down, standard output and standard error -# aren't closed within this duration. -# -# This usually happens in case of a test that creates a child process and lets it inherit those -# handles, but doesn't clean the child process up (especially when it fails). -# -# See for more information. -leak-timeout = "100ms" - -[profile.default.junit] -# Output a JUnit report into the given file inside 'store.dir/'. -# If unspecified, JUnit is not written out. - -path = "junit.xml" - -# The name of the top-level "report" element in JUnit report. If aggregating -# reports across different test runs, it may be useful to provide separate names -# for each report. -report-name = "substrate" - -# Whether standard output and standard error for passing tests should be stored in the JUnit report. -# Output is stored in the and elements of the element. -store-success-output = false - -# Whether standard output and standard error for failing tests should be stored in the JUnit report. -# Output is stored in the and elements of the element. -# -# Note that if a description can be extracted from the output, it is always stored in the -# element. -store-failure-output = true - -# This profile is activated if MIRI_SYSROOT is set. -[profile.default-miri] -# Miri tests take up a lot of memory, so only run 1 test at a time by default. -test-threads = 1 - -# Mutual exclusion of tests with `cargo build` invocation as a lock to avoid multiple -# simultaneous invocations clobbering each other. -[test-groups] -serial-integration = { max-threads = 1 } - -# Running UI tests sequentially -# More info can be found here: https://github.com/paritytech/ci_cd/issues/754 -[[profile.default.overrides]] -filter = 'test(/(^ui$|_ui|ui_)/)' -test-group = 'serial-integration' diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 5cde85ae5790..5f6806c235f6 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -871,7 +871,7 @@ mod tests { use sp_consensus::{BlockOrigin, Environment, Proposer}; use sp_core::crypto::Pair; use sp_inherents::InherentDataProvider; - use sp_keyring::AccountKeyring; + use sp_keyring::Sr25519Keyring; use sp_keystore::KeystorePtr; use sp_runtime::{ generic::{self, Digest, Era, SignedPayload}, @@ -906,8 +906,8 @@ mod tests { let mut slot = 1u64; // For the extrinsics factory - let bob = Arc::new(AccountKeyring::Bob.pair()); - let charlie = Arc::new(AccountKeyring::Charlie.pair()); + let bob = Arc::new(Sr25519Keyring::Bob.pair()); + let charlie = Arc::new(Sr25519Keyring::Charlie.pair()); let mut index = 0; sc_service_test::sync( diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index e68e04840776..faffcd23fbcf 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -392,6 +392,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -479,6 +480,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -2048,6 +2050,7 @@ impl pallet_nfts::Config for Runtime { type Helper = (); type CreateOrigin = AsEnsureOriginWithArg>; type Locker = (); + type BlockNumberProvider = frame_system::Pallet; } impl pallet_transaction_storage::Config for Runtime { @@ -3215,18 +3218,9 @@ impl_runtime_apis! { System::account_nonce(account) } - fn eth_transact( - from: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> pallet_revive::EthContractResult + fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> { - use pallet_revive::AddressMapper; let blockweights: BlockWeights = ::BlockWeights::get(); - let origin = ::AddressMapper::to_account_id(&from); let encoded_size = |pallet_call| { let call = RuntimeCall::Revive(pallet_call); @@ -3235,15 +3229,9 @@ impl_runtime_apis! { }; Revive::bare_eth_transact( - origin, - dest, - value, - input, - gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + tx, + blockweights.max_block, encoded_size, - pallet_revive::DebugInfo::UnsafeDebug, - pallet_revive::CollectEvents::UnsafeCollect, ) } @@ -3260,7 +3248,7 @@ impl_runtime_apis! { dest, value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -3281,7 +3269,7 @@ impl_runtime_apis! { RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), code, data, salt, diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 16112386ad7c..1972c03a368b 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -37,7 +37,7 @@ sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } +sc-service = { features = ["rocksdb"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index ba82910efcdf..6630b7157d96 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -677,6 +677,9 @@ where metrics.dht_event_received.with_label_values(&["put_record_req"]).inc(); } }, + DhtEvent::StartProvidingFailed(..) => {}, + DhtEvent::ProvidersFound(..) => {}, + DhtEvent::ProvidersNotFound(..) => {}, } } diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index 6c3a3b56b1cb..c14771585655 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -231,6 +231,18 @@ impl NetworkDHTProvider for TestNetwork { .unbounded_send(TestNetworkEvent::StoreRecordCalled) .unwrap(); } + + fn start_providing(&self, _: KademliaKey) { + unimplemented!() + } + + fn stop_providing(&self, _: KademliaKey) { + unimplemented!() + } + + fn get_providers(&self, _: KademliaKey) { + unimplemented!() + } } impl NetworkStateInfo for TestNetwork { diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 79e6fddae99f..2096af1c25bb 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -483,7 +483,7 @@ where match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) { Ok(()) => { transaction_pushed = true; - debug!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash); + trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash); }, Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { pending_iterator.report_invalid(&pending_tx); @@ -565,20 +565,22 @@ where if log::log_enabled!(log::Level::Info) { info!( - "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; extrinsics_count: {}", + "🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}", block.header().number(), block_took.as_millis(), ::Hash::from(block.header().hash()), block.header().parent_hash(), + end_reason, extrinsics.len() ) - } else if log::log_enabled!(log::Level::Debug) { - debug!( - "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; {extrinsics_summary}", + } else if log::log_enabled!(log::Level::Trace) { + trace!( + "🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}", block.header().number(), block_took.as_millis(), ::Hash::from(block.header().hash()), block.header().parent_hash(), + end_reason ); } @@ -908,8 +910,8 @@ mod tests { let extrinsics_num = 5; let extrinsics = std::iter::once( Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 100, nonce: 0, } @@ -1014,7 +1016,7 @@ mod tests { }; let huge = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)) - .signer(AccountKeyring::numeric(who)) + .signer(Sr25519Keyring::numeric(who)) .build() }; @@ -1080,13 +1082,13 @@ mod tests { let tiny = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)) - .signer(AccountKeyring::numeric(who)) + .signer(Sr25519Keyring::numeric(who)) .nonce(1) .build() }; let huge = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)) - .signer(AccountKeyring::numeric(who)) + .signer(Sr25519Keyring::numeric(who)) .build() }; diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index adea7a3571dd..13c75fd08c3c 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -26,7 +26,7 @@ //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; //! # use substrate_test_runtime_client::{ -//! # runtime::Transfer, AccountKeyring, +//! # runtime::Transfer, Sr25519Keyring, //! # DefaultTestClientBuilderExt, TestClientBuilderExt, //! # }; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index aa3c1ba3e6f1..fa161f1202ab 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -782,7 +782,7 @@ mod tests { use serde_json::{from_str, json, Value}; use sp_application_crypto::Ss58Codec; use sp_core::storage::well_known_keys; - use sp_keyring::AccountKeyring; + use sp_keyring::Sr25519Keyring; type TestSpec = ChainSpec; @@ -924,8 +924,8 @@ mod tests { }, "substrateTest": { "authorities": [ - AccountKeyring::Ferdie.public().to_ss58check(), - AccountKeyring::Alice.public().to_ss58check() + Sr25519Keyring::Ferdie.public().to_ss58check(), + Sr25519Keyring::Alice.public().to_ss58check() ], } })) @@ -980,8 +980,8 @@ mod tests { }, "substrateTest": { "authorities": [ - AccountKeyring::Ferdie.public().to_ss58check(), - AccountKeyring::Alice.public().to_ss58check() + Sr25519Keyring::Ferdie.public().to_ss58check(), + Sr25519Keyring::Alice.public().to_ss58check() ], } })) @@ -1083,8 +1083,8 @@ mod tests { "invalid_pallet": {}, "substrateTest": { "authorities": [ - AccountKeyring::Ferdie.public().to_ss58check(), - AccountKeyring::Alice.public().to_ss58check() + Sr25519Keyring::Ferdie.public().to_ss58check(), + Sr25519Keyring::Alice.public().to_ss58check() ], } })) diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs index add7cb4f8505..e4b8b9644feb 100644 --- a/substrate/client/cli/src/params/import_params.rs +++ b/substrate/client/cli/src/params/import_params.rs @@ -78,21 +78,13 @@ pub struct ImportParams { /// Specify the state cache size. /// /// Providing `0` will disable the cache. - #[arg(long, value_name = "Bytes", default_value_t = 67108864)] + #[arg(long, value_name = "Bytes", default_value_t = 1024 * 1024 * 1024)] pub trie_cache_size: usize, - - /// DEPRECATED: switch to `--trie-cache-size`. - #[arg(long)] - state_cache_size: Option, } impl ImportParams { /// Specify the trie cache maximum size. pub fn trie_cache_maximum_size(&self) -> Option { - if self.state_cache_size.is_some() { - eprintln!("`--state-cache-size` was deprecated. Please switch to `--trie-cache-size`."); - } - if self.trie_cache_size == 0 { None } else { diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index 465372fba17d..e0c52deb44ca 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -33,10 +33,12 @@ pub struct SharedParams { /// Specify the development chain. /// - /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, - /// `--alice`, and `--tmp` flags, unless explicitly overridden. - /// It also disables local peer discovery (see --no-mdns and --discover-local) - #[arg(long, conflicts_with_all = &["chain"])] + /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and `--tmp` + /// flags, unless explicitly overridden. It also disables local peer discovery (see `--no-mdns` + /// and `--discover-local`). With this flag some nodes might start with manual seal, producing + /// blocks at certain events (e.g. `polkadot-omni-node`, which produces blocks at certain + /// intervals dictated by `--dev-block-time`). + #[arg(long)] pub dev: bool, /// Specify custom base path. @@ -109,12 +111,8 @@ impl SharedParams { pub fn chain_id(&self, is_dev: bool) -> String { match self.chain { Some(ref chain) => chain.clone(), - None => - if is_dev { - "dev".into() - } else { - "".into() - }, + None if is_dev => "dev".into(), + _ => "".into(), } } diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 39f8f8609d8d..af9bcc8d56d6 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -353,7 +353,7 @@ mod tests { use sp_inherents::InherentData; use sp_runtime::generic::{Digest, DigestItem}; use substrate_test_runtime_client::{ - AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + DefaultTestClientBuilderExt, Sr25519Keyring::*, TestClientBuilder, TestClientBuilderExt, }; use substrate_test_runtime_transaction_pool::{uxt, TestApi}; diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index cec981c05602..092101945107 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -1486,6 +1486,7 @@ impl Backend { .map(|(n, _)| n) .unwrap_or(Zero::zero()); let existing_header = number <= highest_leaf && self.blockchain.header(hash)?.is_some(); + let existing_body = pending_block.body.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1677,6 +1678,23 @@ impl Backend { children, ); } + } + + let should_check_block_gap = !existing_header || !existing_body; + + if should_check_block_gap { + let insert_new_gap = + |transaction: &mut Transaction, + new_gap: BlockGap>, + block_gap: &mut Option>>| { + transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode()); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); + block_gap.replace(new_gap); + }; if let Some(mut gap) = block_gap { match gap.gap_type { @@ -1695,43 +1713,65 @@ impl Backend { block_gap = None; debug!(target: "db", "Removed block gap."); } else { - block_gap = Some(gap); + insert_new_gap(&mut transaction, gap, &mut block_gap); debug!(target: "db", "Update block gap. {block_gap:?}"); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP, - &gap.encode(), - ); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP_VERSION, - &BLOCK_GAP_CURRENT_VERSION.encode(), - ); } block_gap_updated = true; }, BlockGapType::MissingBody => { - unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406") + // Gap increased when syncing the header chain during fast sync. + if number == gap.end + One::one() && !existing_body { + gap.end += One::one(); + utils::insert_number_to_key_mapping( + &mut transaction, + columns::KEY_LOOKUP, + number, + hash, + )?; + insert_new_gap(&mut transaction, gap, &mut block_gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + block_gap_updated = true; + // Gap decreased when downloading the full blocks. + } else if number == gap.start && existing_body { + gap.start += One::one(); + if gap.start > gap.end { + transaction.remove(columns::META, meta_keys::BLOCK_GAP); + transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION); + block_gap = None; + debug!(target: "db", "Removed block gap."); + } else { + insert_new_gap(&mut transaction, gap, &mut block_gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + } + block_gap_updated = true; + } }, } - } else if operation.create_gap && - number > best_num + One::one() && - self.blockchain.header(parent_hash)?.is_none() - { - let gap = BlockGap { - start: best_num + One::one(), - end: number - One::one(), - gap_type: BlockGapType::MissingHeaderAndBody, - }; - transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP_VERSION, - &BLOCK_GAP_CURRENT_VERSION.encode(), - ); - block_gap = Some(gap); - block_gap_updated = true; - debug!(target: "db", "Detected block gap {block_gap:?}"); + } else if operation.create_gap { + if number > best_num + One::one() && + self.blockchain.header(parent_hash)?.is_none() + { + let gap = BlockGap { + start: best_num + One::one(), + end: number - One::one(), + gap_type: BlockGapType::MissingHeaderAndBody, + }; + insert_new_gap(&mut transaction, gap, &mut block_gap); + block_gap_updated = true; + debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}"); + } else if number == best_num + One::one() && + self.blockchain.header(parent_hash)?.is_some() && + !existing_body + { + let gap = BlockGap { + start: number, + end: number, + gap_type: BlockGapType::MissingBody, + }; + insert_new_gap(&mut transaction, gap, &mut block_gap); + block_gap_updated = true; + debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}"); + } } } diff --git a/substrate/client/executor/common/src/error.rs b/substrate/client/executor/common/src/error.rs index 9d489eaae420..a94c1d493134 100644 --- a/substrate/client/executor/common/src/error.rs +++ b/substrate/client/executor/common/src/error.rs @@ -150,8 +150,8 @@ pub enum WasmError { Other(String), } -impl From for WasmError { - fn from(error: polkavm::ProgramParseError) -> Self { +impl From for WasmError { + fn from(error: polkavm::program::ProgramParseError) -> Self { WasmError::Other(error.to_string()) } } diff --git a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs index d689083b2f85..e3f4b4ad9774 100644 --- a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use crate::{error::WasmError, wasm_runtime::HeapAllocStrategy}; +use polkavm::ArcBytes; use wasm_instrument::parity_wasm::elements::{ deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, Module, Section, @@ -29,7 +30,7 @@ pub struct RuntimeBlob(BlobKind); #[derive(Clone)] enum BlobKind { WebAssembly(Module), - PolkaVM(polkavm::ProgramBlob<'static>), + PolkaVM((polkavm::ProgramBlob, ArcBytes)), } impl RuntimeBlob { @@ -52,9 +53,9 @@ impl RuntimeBlob { pub fn new(raw_blob: &[u8]) -> Result { if raw_blob.starts_with(b"PVM\0") { if crate::is_polkavm_enabled() { - return Ok(Self(BlobKind::PolkaVM( - polkavm::ProgramBlob::parse(raw_blob)?.into_owned(), - ))); + let raw = ArcBytes::from(raw_blob); + let blob = polkavm::ProgramBlob::parse(raw.clone())?; + return Ok(Self(BlobKind::PolkaVM((blob, raw)))); } else { return Err(WasmError::Other("expected a WASM runtime blob, found a PolkaVM runtime blob; set the 'SUBSTRATE_ENABLE_POLKAVM' environment variable to enable the experimental PolkaVM-based executor".to_string())); } @@ -192,7 +193,7 @@ impl RuntimeBlob { match self.0 { BlobKind::WebAssembly(raw_module) => serialize(raw_module).expect("serializing into a vec should succeed; qed"), - BlobKind::PolkaVM(ref blob) => blob.as_bytes().to_vec(), + BlobKind::PolkaVM(ref blob) => blob.1.to_vec(), } } @@ -227,7 +228,7 @@ impl RuntimeBlob { pub fn as_polkavm_blob(&self) -> Option<&polkavm::ProgramBlob> { match self.0 { BlobKind::WebAssembly(..) => None, - BlobKind::PolkaVM(ref blob) => Some(blob), + BlobKind::PolkaVM((ref blob, _)) => Some(blob), } } } diff --git a/substrate/client/executor/polkavm/src/lib.rs b/substrate/client/executor/polkavm/src/lib.rs index 1bd72eb33d30..134f9ea3d8c4 100644 --- a/substrate/client/executor/polkavm/src/lib.rs +++ b/substrate/client/executor/polkavm/src/lib.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use polkavm::{Caller, Reg}; +use polkavm::{CallError, Caller, Reg}; use sc_executor_common::{ error::{Error, WasmError}, wasm_runtime::{AllocationStats, WasmInstance, WasmModule}, @@ -26,10 +26,10 @@ use sp_wasm_interface::{ }; #[repr(transparent)] -pub struct InstancePre(polkavm::InstancePre<()>); +pub struct InstancePre(polkavm::InstancePre<(), String>); #[repr(transparent)] -pub struct Instance(polkavm::Instance<()>); +pub struct Instance(polkavm::Instance<(), String>); impl WasmModule for InstancePre { fn new_instance(&self) -> Result, Error> { @@ -43,11 +43,13 @@ impl WasmInstance for Instance { name: &str, raw_data: &[u8], ) -> (Result, Error>, Option) { - let Some(method_index) = self.0.module().lookup_export(name) else { - return ( - Err(format!("cannot call into the runtime: export not found: '{name}'").into()), - None, - ); + let pc = match self.0.module().exports().find(|e| e.symbol() == name) { + Some(export) => export.program_counter(), + None => + return ( + Err(format!("cannot call into the runtime: export not found: '{name}'").into()), + None, + ), }; let Ok(raw_data_length) = u32::try_from(raw_data.len()) else { @@ -58,56 +60,60 @@ impl WasmInstance for Instance { }; // TODO: This will leak guest memory; find a better solution. - let mut state_args = polkavm::StateArgs::new(); - // Make sure the memory is cleared... - state_args.reset_memory(true); - // ...and allocate space for the input payload. - state_args.sbrk(raw_data_length); + // Make sure that the memory is cleared... + if let Err(err) = self.0.reset_memory() { + return ( + Err(format!( + "call into the runtime method '{name}' failed: reset memory failed: {err}" + ) + .into()), + None, + ); + } - match self.0.update_state(state_args) { - Ok(()) => {}, - Err(polkavm::ExecutionError::Trap(trap)) => { - return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {trap}").into()), None); - }, - Err(polkavm::ExecutionError::Error(error)) => { - return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {error}").into()), None); - }, - Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), + // ... and allocate space for the input payload. + if let Err(err) = self.0.sbrk(raw_data_length) { + return ( + Err(format!( + "call into the runtime method '{name}' failed: reset memory failed: {err}" + ) + .into()), + None, + ); } // Grab the address of where the guest's heap starts; that's where we've just allocated // the memory for the input payload. let data_pointer = self.0.module().memory_map().heap_base(); - if let Err(error) = self.0.write_memory(data_pointer, raw_data) { - return (Err(format!("call into the runtime method '{name}': failed to write the input payload into guest memory: {error}").into()), None); + if let Err(err) = self.0.write_memory(data_pointer, raw_data) { + return (Err(format!("call into the runtime method '{name}': failed to write the input payload into guest memory: {err}").into()), None); } - let mut state = (); - let mut call_args = polkavm::CallArgs::new(&mut state, method_index); - call_args.args_untyped(&[data_pointer, raw_data_length]); - - match self.0.call(Default::default(), call_args) { + match self.0.call_typed(&mut (), pc, (data_pointer, raw_data_length)) { Ok(()) => {}, - Err(polkavm::ExecutionError::Trap(trap)) => { + Err(CallError::Trap) => return ( - Err(format!("call into the runtime method '{name}' failed: {trap}").into()), + Err(format!("call into the runtime method '{name}' failed: trap").into()), None, - ); - }, - Err(polkavm::ExecutionError::Error(error)) => { + ), + Err(CallError::Error(err)) => return ( - Err(format!("call into the runtime method '{name}' failed: {error}").into()), + Err(format!("call into the runtime method '{name}' failed: {err}").into()), None, - ); - }, - Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), - } + ), + Err(CallError::User(err)) => + return ( + Err(format!("call into the runtime method '{name}' failed: {err}").into()), + None, + ), + Err(CallError::NotEnoughGas) => unreachable!("gas metering is never enabled"), + }; - let result_pointer = self.0.get_reg(Reg::A0); - let result_length = self.0.get_reg(Reg::A1); - let output = match self.0.read_memory_into_vec(result_pointer, result_length) { + let result_pointer = self.0.reg(Reg::A0); + let result_length = self.0.reg(Reg::A1); + let output = match self.0.read_memory(result_pointer as u32, result_length as u32) { Ok(output) => output, Err(error) => { return (Err(format!("call into the runtime method '{name}' failed: failed to read the return payload: {error}").into()), None) @@ -127,20 +133,31 @@ impl<'r, 'a> FunctionContext for Context<'r, 'a> { dest: &mut [u8], ) -> sp_wasm_interface::Result<()> { self.0 - .read_memory_into_slice(u32::from(address), dest) + .instance + .read_memory_into(u32::from(address), dest) .map_err(|error| error.to_string()) .map(|_| ()) } fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - self.0.write_memory(u32::from(address), data).map_err(|error| error.to_string()) + self.0 + .instance + .write_memory(u32::from(address), data) + .map_err(|error| error.to_string()) } fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { - let pointer = self.0.sbrk(0).expect("fetching the current heap pointer never fails"); + let pointer = match self.0.instance.sbrk(0) { + Ok(pointer) => pointer.expect("fetching the current heap pointer never fails"), + Err(err) => return Err(format!("sbrk failed: {err}")), + }; // TODO: This will leak guest memory; find a better solution. - self.0.sbrk(size).ok_or_else(|| String::from("allocation failed"))?; + match self.0.instance.sbrk(size) { + Ok(Some(_)) => (), + Ok(None) => return Err(String::from("allocation error")), + Err(err) => return Err(format!("sbrk failed: {err}")), + } Ok(Pointer::new(pointer)) } @@ -155,41 +172,46 @@ impl<'r, 'a> FunctionContext for Context<'r, 'a> { } } -fn call_host_function( - caller: &mut Caller<()>, - function: &dyn Function, -) -> Result<(), polkavm::Trap> { +fn call_host_function(caller: &mut Caller<()>, function: &dyn Function) -> Result<(), String> { let mut args = [Value::I64(0); Reg::ARG_REGS.len()]; let mut nth_reg = 0; for (nth_arg, kind) in function.signature().args.iter().enumerate() { match kind { ValueType::I32 => { - args[nth_arg] = Value::I32(caller.get_reg(Reg::ARG_REGS[nth_reg]) as i32); + args[nth_arg] = Value::I32(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as i32); nth_reg += 1; }, ValueType::F32 => { - args[nth_arg] = Value::F32(caller.get_reg(Reg::ARG_REGS[nth_reg])); - nth_reg += 1; - }, - ValueType::I64 => { - let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - args[nth_arg] = - Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64); - }, - ValueType::F64 => { - let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); + args[nth_arg] = Value::F32(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as u32); nth_reg += 1; - - let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32)); }, + ValueType::I64 => + if caller.instance.is_64_bit() { + args[nth_arg] = Value::I64(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as i64); + nth_reg += 1; + } else { + let value_lo = caller.instance.reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.instance.reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = + Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64); + }, + ValueType::F64 => + if caller.instance.is_64_bit() { + args[nth_arg] = Value::F64(caller.instance.reg(Reg::ARG_REGS[nth_reg])); + nth_reg += 1; + } else { + let value_lo = caller.instance.reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.instance.reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32)); + }, } } @@ -204,27 +226,33 @@ fn call_host_function( { Ok(value) => value, Err(error) => { - log::warn!("Call into the host function '{}' failed: {error}", function.name()); - return Err(polkavm::Trap::default()); + let name = function.name(); + return Err(format!("call into the host function '{name}' failed: {error}")) }, }; if let Some(value) = value { match value { Value::I32(value) => { - caller.set_reg(Reg::A0, value as u32); + caller.instance.set_reg(Reg::A0, value as u64); }, Value::F32(value) => { - caller.set_reg(Reg::A0, value); - }, - Value::I64(value) => { - caller.set_reg(Reg::A0, value as u32); - caller.set_reg(Reg::A1, (value >> 32) as u32); - }, - Value::F64(value) => { - caller.set_reg(Reg::A0, value as u32); - caller.set_reg(Reg::A1, (value >> 32) as u32); + caller.instance.set_reg(Reg::A0, value as u64); }, + Value::I64(value) => + if caller.instance.is_64_bit() { + caller.instance.set_reg(Reg::A0, value as u64); + } else { + caller.instance.set_reg(Reg::A0, value as u64); + caller.instance.set_reg(Reg::A1, (value >> 32) as u64); + }, + Value::F64(value) => + if caller.instance.is_64_bit() { + caller.instance.set_reg(Reg::A0, value as u64); + } else { + caller.instance.set_reg(Reg::A0, value as u64); + caller.instance.set_reg(Reg::A1, (value >> 32) as u64); + }, } } @@ -250,12 +278,16 @@ where }, }; - let module = polkavm::Module::from_blob(&engine, &polkavm::ModuleConfig::default(), blob)?; - let mut linker = polkavm::Linker::new(&engine); + let module = + polkavm::Module::from_blob(&engine, &polkavm::ModuleConfig::default(), blob.clone())?; + + let mut linker = polkavm::Linker::new(); + for function in H::host_functions() { - linker.func_new(function.name(), |mut caller| call_host_function(&mut caller, function))?; + linker.define_untyped(function.name(), |mut caller: Caller<()>| { + call_host_function(&mut caller, function) + })?; } - let instance_pre = linker.instantiate_pre(&module)?; Ok(Box::new(InstancePre(instance_pre))) } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index a4bd922a76d5..2daf1e49ee4b 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -220,18 +220,16 @@ impl Future for GossipEngine { }, NotificationEvent::NotificationStreamOpened { peer, handshake, .. - } => { - let Some(role) = this.network.peer_role(peer, handshake) else { + } => + if let Some(role) = this.network.peer_role(peer, handshake) { + this.state_machine.new_peer( + &mut this.notification_service, + peer, + role, + ); + } else { log::debug!(target: "gossip", "role for {peer} couldn't be determined"); - continue - }; - - this.state_machine.new_peer( - &mut this.notification_service, - peer, - role, - ); - }, + }, NotificationEvent::NotificationStreamClosed { peer } => { this.state_machine .peer_disconnected(&mut this.notification_service, peer); diff --git a/substrate/client/network/benches/notifications_protocol.rs b/substrate/client/network/benches/notifications_protocol.rs index c1e18c7b7f47..40a810d616b5 100644 --- a/substrate/client/network/benches/notifications_protocol.rs +++ b/substrate/client/network/benches/notifications_protocol.rs @@ -25,55 +25,42 @@ use sc_network::{ FullNetworkConfiguration, MultiaddrWithPeerId, NetworkConfiguration, NonReservedPeerMode, NotificationHandshake, Params, ProtocolId, Role, SetConfig, }, - service::traits::NotificationEvent, + service::traits::{NetworkService, NotificationEvent}, Litep2pNetworkBackend, NetworkBackend, NetworkWorker, NotificationMetrics, NotificationService, - Roles, + PeerId, Roles, }; use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; -use sc_network_types::build_multiaddr; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Zero}; -use std::{ - net::{IpAddr, Ipv4Addr, TcpListener}, - str::FromStr, -}; +use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; +use tokio::{sync::Mutex, task::JoinHandle}; -const MAX_SIZE: u64 = 2u64.pow(30); -const SAMPLE_SIZE: usize = 50; -const NOTIFICATIONS: usize = 50; -const EXPONENTS: &[(u32, &'static str)] = &[ - (6, "64B"), - (9, "512B"), - (12, "4KB"), - (15, "64KB"), - (18, "256KB"), - (21, "2MB"), - (24, "16MB"), - (27, "128MB"), +const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of notifications, label) + (6, 100, "64B"), + (9, 100, "512B"), + (12, 100, "4KB"), + (15, 100, "64KB"), ]; - -// TODO: It's be better to bind system-provided port when initializing the worker -fn get_listen_address() -> sc_network::Multiaddr { - let ip = Ipv4Addr::from_str("127.0.0.1").unwrap(); - let listener = TcpListener::bind((IpAddr::V4(ip), 0)).unwrap(); // Bind to a random port - let local_addr = listener.local_addr().unwrap(); - let port = local_addr.port(); - - build_multiaddr!(Ip4(ip), Tcp(port)) -} +const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of notifications, label) + (18, 10, "256KB"), + (21, 10, "2MB"), + (24, 10, "16MB"), + (27, 10, "128MB"), +]; +const MAX_SIZE: u64 = 2u64.pow(30); fn create_network_worker( - listen_addr: sc_network::Multiaddr, -) -> (N, Box) +) -> (N, Arc, Arc>>) where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { let role = Role::Full; - let mut net_conf = NetworkConfiguration::new_local(); - net_conf.listen_addresses = vec![listen_addr]; + let net_conf = NetworkConfiguration::new_local(); let network_config = FullNetworkConfiguration::::new(&net_conf, None); let genesis_hash = runtime::Hash::zero(); let (block_announce_config, notification_service) = N::notification_config( @@ -110,96 +97,122 @@ where notification_metrics: NotificationMetrics::new(None), }) .unwrap(); + let network_service = worker.network_service(); + let notification_service = Arc::new(Mutex::new(notification_service)); - (worker, notification_service) + (worker, network_service, notification_service) } -async fn run_serially(size: usize, limit: usize) +struct BenchSetup { + notification_service1: Arc>>, + notification_service2: Arc>>, + peer_id2: PeerId, + handle1: JoinHandle<()>, + handle2: JoinHandle<()>, +} + +impl Drop for BenchSetup { + fn drop(&mut self) { + self.handle1.abort(); + self.handle2.abort(); + } +} + +fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, mut notification_service1) = create_network_worker::(listen_address1); - let (worker2, mut notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2: sc_network::PeerId = worker2.network_service().local_peer_id().into(); + let _guard = rt.enter(); - worker1 - .network_service() - .add_reserved_peer(MultiaddrWithPeerId { multiaddr: listen_address2, peer_id: peer_id2 }) - .unwrap(); + let (worker1, network_service1, notification_service1) = create_network_worker::(); + let (worker2, network_service2, notification_service2) = create_network_worker::(); + let peer_id2: sc_network::PeerId = network_service2.local_peer_id().into(); + let handle1 = tokio::spawn(worker1.run()); + let handle2 = tokio::spawn(worker2.run()); - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (tx, rx) = async_channel::bounded(10); + let ready = tokio::spawn({ + let notification_service1 = Arc::clone(¬ification_service1); + let notification_service2 = Arc::clone(¬ification_service2); - let network1 = tokio::spawn(async move { - let mut sent_counter = 0; - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - event = notification_service1.next_event() => { - match event { - Some(NotificationEvent::NotificationStreamOpened { .. }) => { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - }, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if sent_counter >= limit { - break; - } - panic!("Unexpected stream closure {:?}", event); - } - event => panic!("Unexpected event {:?}", event), - }; - }, - message = rx.recv() => { - match message { - Ok(Some(_)) => { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - }, - Ok(None) => break, - Err(err) => panic!("Unexpected error {:?}", err), + async move { + let listen_address2 = { + while network_service2.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + network_service2.listen_addresses()[0].clone() + }; + network_service1 + .add_reserved_peer(MultiaddrWithPeerId { + multiaddr: listen_address2, + peer_id: peer_id2, + }) + .unwrap(); - } + let mut notification_service1 = notification_service1.lock().await; + let mut notification_service2 = notification_service2.lock().await; + loop { + tokio::select! { + Some(event) = notification_service1.next_event() => { + if let NotificationEvent::NotificationStreamOpened { .. } = event { + break; + } + }, + Some(event) = notification_service2.next_event() => { + if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + } + }, } } } }); - let network2 = tokio::spawn(async move { - let mut received_counter = 0; - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - event = notification_service2.next_event() => { - match event { - Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { - result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); - }, - Some(NotificationEvent::NotificationStreamOpened { .. }) => {}, - Some(NotificationEvent::NotificationReceived { .. }) => { - received_counter += 1; - if received_counter >= limit { - let _ = tx.send(None).await; - break - } - let _ = tx.send(Some(())).await; - }, - event => panic!("Unexpected event {:?}", event), - }; - }, + + tokio::task::block_in_place(|| { + let _ = tokio::runtime::Handle::current().block_on(ready); + }); + + Arc::new(BenchSetup { + notification_service1, + notification_service2, + peer_id2, + handle1, + handle2, + }) +} + +async fn run_serially(setup: Arc, size: usize, limit: usize) { + let (tx, rx) = async_channel::bounded(1); + let _ = tx.send(Some(())).await; + let network1 = tokio::spawn({ + let notification_service1 = Arc::clone(&setup.notification_service1); + let peer_id2 = setup.peer_id2; + async move { + let mut notification_service1 = notification_service1.lock().await; + while let Ok(message) = rx.recv().await { + let Some(_) = message else { break }; + notification_service1 + .send_async_notification(&peer_id2, vec![0; size]) + .await + .unwrap(); + } + } + }); + let network2 = tokio::spawn({ + let notification_service2 = Arc::clone(&setup.notification_service2); + async move { + let mut notification_service2 = notification_service2.lock().await; + let mut received_counter = 0; + while let Some(event) = notification_service2.next_event().await { + if let NotificationEvent::NotificationReceived { .. } = event { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(None).await; + break; + } + let _ = tx.send(Some(())).await; + } } } }); @@ -207,77 +220,34 @@ where let _ = tokio::join!(network1, network2); } -async fn run_with_backpressure(size: usize, limit: usize) -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, mut notification_service1) = create_network_worker::(listen_address1); - let (worker2, mut notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2: sc_network::PeerId = worker2.network_service().local_peer_id().into(); - - worker1 - .network_service() - .add_reserved_peer(MultiaddrWithPeerId { multiaddr: listen_address2, peer_id: peer_id2 }) - .unwrap(); - - let network1_run = worker1.run(); - let network2_run = worker2.run(); - - let network1 = tokio::spawn(async move { - let mut sent_counter = 0; - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - event = notification_service1.next_event() => { - match event { - Some(NotificationEvent::NotificationStreamOpened { .. }) => { - while sent_counter < limit { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - } - }, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if sent_counter != limit { panic!("Stream closed unexpectedly") } - break - }, - event => panic!("Unexpected event {:?}", event), - }; - }, +async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { + let (tx, rx) = async_channel::bounded(1); + let network1 = tokio::spawn({ + let setup = Arc::clone(&setup); + async move { + let mut notification_service1 = setup.notification_service1.lock().await; + for _ in 0..limit { + notification_service1 + .send_async_notification(&setup.peer_id2, vec![0; size]) + .await + .unwrap(); } + let _ = rx.recv().await; } }); - let network2 = tokio::spawn(async move { - let mut received_counter = 0; - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - event = notification_service2.next_event() => { - match event { - Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { - result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); - }, - Some(NotificationEvent::NotificationStreamOpened { .. }) => {}, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if received_counter != limit { panic!("Stream closed unexpectedly") } - break - }, - Some(NotificationEvent::NotificationReceived { .. }) => { - received_counter += 1; - if received_counter >= limit { break } - }, - event => panic!("Unexpected event {:?}", event), - }; - }, + let network2 = tokio::spawn({ + let setup = Arc::clone(&setup); + async move { + let mut notification_service2 = setup.notification_service2.lock().await; + let mut received_counter = 0; + while let Some(event) = notification_service2.next_event().await { + if let NotificationEvent::NotificationReceived { .. } = event { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(()).await; + break; + } + } } } }); @@ -285,64 +255,64 @@ where let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion) { +fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("notifications_benchmark"); + let mut group = c.benchmark_group(group); group.plot_config(plot_config); - for &(exponent, label) in EXPONENTS.iter() { + let libp2p_setup = setup_workers::>(&rt); + for &(exponent, limit, label) in payload.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(NOTIFICATIONS as u64 * size as u64)); - + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( BenchmarkId::new("libp2p/serially", label), - &(size, NOTIFICATIONS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::>(size, limit) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); }, ); group.bench_with_input( - BenchmarkId::new("litep2p/serially", label), - &(size, NOTIFICATIONS), + BenchmarkId::new("libp2p/with_backpressure", label), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::( - size, limit, - ) - }); + b.to_async(&rt) + .iter(|| run_with_backpressure(Arc::clone(&libp2p_setup), size, limit)); }, ); + } + drop(libp2p_setup); + + let litep2p_setup = setup_workers::(&rt); + for &(exponent, limit, label) in payload.iter() { + let size = 2usize.pow(exponent); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( - BenchmarkId::new("libp2p/with_backpressure", label), - &(size, NOTIFICATIONS), + BenchmarkId::new("litep2p/serially", label), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_with_backpressure::>( - size, limit, - ) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); }, ); group.bench_with_input( BenchmarkId::new("litep2p/with_backpressure", label), - &(size, NOTIFICATIONS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_with_backpressure::( - size, limit, - ) - }); + b.to_async(&rt) + .iter(|| run_with_backpressure(Arc::clone(&litep2p_setup), size, limit)); }, ); } + drop(litep2p_setup); } -criterion_group! { - name = benches; - config = Criterion::default().sample_size(SAMPLE_SIZE); - targets = run_benchmark +fn run_benchmark_with_small_payload(c: &mut Criterion) { + run_benchmark(c, SMALL_PAYLOAD, "notifications_protocol/small_payload"); } + +fn run_benchmark_with_large_payload(c: &mut Criterion) { + run_benchmark(c, LARGE_PAYLOAD, "notifications_protocol/large_payload"); +} + +criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); criterion_main!(benches); diff --git a/substrate/client/network/benches/request_response_protocol.rs b/substrate/client/network/benches/request_response_protocol.rs index b428d0d75ac5..85381112b753 100644 --- a/substrate/client/network/benches/request_response_protocol.rs +++ b/substrate/client/network/benches/request_response_protocol.rs @@ -25,46 +25,39 @@ use sc_network::{ FullNetworkConfiguration, IncomingRequest, NetworkConfiguration, NonReservedPeerMode, NotificationHandshake, OutgoingResponse, Params, ProtocolId, Role, SetConfig, }, + service::traits::NetworkService, IfDisconnected, Litep2pNetworkBackend, NetworkBackend, NetworkRequest, NetworkWorker, - NotificationMetrics, NotificationService, Roles, + NotificationMetrics, NotificationService, PeerId, Roles, }; use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; -use sc_network_types::build_multiaddr; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Zero}; -use std::{ - net::{IpAddr, Ipv4Addr, TcpListener}, - str::FromStr, - time::Duration, -}; +use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; +use tokio::{sync::Mutex, task::JoinHandle}; const MAX_SIZE: u64 = 2u64.pow(30); -const SAMPLE_SIZE: usize = 50; -const REQUESTS: usize = 50; -const EXPONENTS: &[(u32, &'static str)] = &[ - (6, "64B"), - (9, "512B"), - (12, "4KB"), - (15, "64KB"), - (18, "256KB"), - (21, "2MB"), - (24, "16MB"), - (27, "128MB"), +const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of requests, label) + (6, 100, "64B"), + (9, 100, "512B"), + (12, 100, "4KB"), + (15, 100, "64KB"), +]; +const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of requests, label) + (18, 10, "256KB"), + (21, 10, "2MB"), + (24, 10, "16MB"), + (27, 10, "128MB"), ]; -fn get_listen_address() -> sc_network::Multiaddr { - let ip = Ipv4Addr::from_str("127.0.0.1").unwrap(); - let listener = TcpListener::bind((IpAddr::V4(ip), 0)).unwrap(); // Bind to a random port - let local_addr = listener.local_addr().unwrap(); - let port = local_addr.port(); - - build_multiaddr!(Ip4(ip), Tcp(port)) -} - -pub fn create_network_worker( - listen_addr: sc_network::Multiaddr, -) -> (N, async_channel::Receiver, Box) +pub fn create_network_worker() -> ( + N, + Arc, + async_channel::Receiver, + Arc>>, +) where B: BlockT + 'static, H: ExHashT, @@ -80,8 +73,7 @@ where Some(tx), ); let role = Role::Full; - let mut net_conf = NetworkConfiguration::new_local(); - net_conf.listen_addresses = vec![listen_addr]; + let net_conf = NetworkConfiguration::new_local(); let mut network_config = FullNetworkConfiguration::new(&net_conf, None); network_config.add_request_response_protocol(request_response_config); let genesis_hash = runtime::Hash::zero(); @@ -119,71 +111,115 @@ where notification_metrics: NotificationMetrics::new(None), }) .unwrap(); + let notification_service = Arc::new(Mutex::new(notification_service)); + let network_service = worker.network_service(); - (worker, rx, notification_service) + (worker, network_service, rx, notification_service) } -async fn run_serially(size: usize, limit: usize) +struct BenchSetup { + #[allow(dead_code)] + notification_service1: Arc>>, + #[allow(dead_code)] + notification_service2: Arc>>, + network_service1: Arc, + peer_id2: PeerId, + handle1: JoinHandle<()>, + handle2: JoinHandle<()>, + #[allow(dead_code)] + rx1: async_channel::Receiver, + rx2: async_channel::Receiver, +} + +impl Drop for BenchSetup { + fn drop(&mut self) { + self.handle1.abort(); + self.handle2.abort(); + } +} + +fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, _rx1, _notification_service1) = create_network_worker::(listen_address1); - let service1 = worker1.network_service().clone(); - let (worker2, rx2, _notification_service2) = - create_network_worker::(listen_address2.clone()); + let _guard = rt.enter(); + + let (worker1, network_service1, rx1, notification_service1) = + create_network_worker::(); + let (worker2, network_service2, rx2, notification_service2) = + create_network_worker::(); let peer_id2 = worker2.network_service().local_peer_id(); + let handle1 = tokio::spawn(worker1.run()); + let handle2 = tokio::spawn(worker2.run()); - worker1.network_service().add_known_address(peer_id2, listen_address2.into()); + let ready = tokio::spawn({ + let network_service1 = Arc::clone(&network_service1); - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (break_tx, break_rx) = async_channel::bounded(10); - let requests = async move { - let mut sent_counter = 0; - while sent_counter < limit { - let _ = service1 - .request( - peer_id2.into(), - "/request-response/1".into(), - vec![0; 2], - None, - IfDisconnected::TryConnect, - ) - .await - .unwrap(); - sent_counter += 1; + async move { + let listen_address2 = { + while network_service2.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + network_service2.listen_addresses()[0].clone() + }; + network_service1.add_known_address(peer_id2, listen_address2.into()); } - let _ = break_tx.send(()).await; - }; + }); - let network1 = tokio::spawn(async move { - tokio::pin!(requests); - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - _ = &mut requests => break, + tokio::task::block_in_place(|| { + let _ = tokio::runtime::Handle::current().block_on(ready); + }); + + Arc::new(BenchSetup { + notification_service1, + notification_service2, + network_service1, + peer_id2, + handle1, + handle2, + rx1, + rx2, + }) +} + +async fn run_serially(setup: Arc, size: usize, limit: usize) { + let (break_tx, break_rx) = async_channel::bounded(1); + let network1 = tokio::spawn({ + let network_service1 = Arc::clone(&setup.network_service1); + let peer_id2 = setup.peer_id2; + async move { + for _ in 0..limit { + let _ = network_service1 + .request( + peer_id2.into(), + "/request-response/1".into(), + vec![0; 2], + None, + IfDisconnected::TryConnect, + ) + .await + .unwrap(); } + let _ = break_tx.send(()).await; } }); - let network2 = tokio::spawn(async move { - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - res = rx2.recv() => { - let IncomingRequest { pending_response, .. } = res.unwrap(); - pending_response.send(OutgoingResponse { - result: Ok(vec![0; size]), - reputation_changes: vec![], - sent_feedback: None, - }).unwrap(); - }, - _ = break_rx.recv() => break, + let network2 = tokio::spawn({ + let rx2 = setup.rx2.clone(); + async move { + loop { + tokio::select! { + res = rx2.recv() => { + let IncomingRequest { pending_response, .. } = res.unwrap(); + pending_response.send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }).unwrap(); + }, + _ = break_rx.recv() => break, + } } } }); @@ -194,29 +230,12 @@ where // The libp2p request-response implementation does not provide any backpressure feedback. // So this benchmark is useless until we implement it for litep2p. #[allow(dead_code)] -async fn run_with_backpressure(size: usize, limit: usize) -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, _rx1, _notification_service1) = create_network_worker::(listen_address1); - let service1 = worker1.network_service().clone(); - let (worker2, rx2, _notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2 = worker2.network_service().local_peer_id(); - - worker1.network_service().add_known_address(peer_id2, listen_address2.into()); - - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (break_tx, break_rx) = async_channel::bounded(10); +async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { + let (break_tx, break_rx) = async_channel::bounded(1); let requests = futures::future::join_all((0..limit).into_iter().map(|_| { let (tx, rx) = futures::channel::oneshot::channel(); - service1.start_request( - peer_id2.into(), + setup.network_service1.start_request( + setup.peer_id2.into(), "/request-response/1".into(), vec![0; 8], None, @@ -227,77 +246,72 @@ where })); let network1 = tokio::spawn(async move { - tokio::pin!(requests); - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - responses = &mut requests => { - for res in responses { - res.unwrap().unwrap(); - } - let _ = break_tx.send(()).await; - break; - }, - } + let responses = requests.await; + for res in responses { + res.unwrap().unwrap(); } + let _ = break_tx.send(()).await; }); let network2 = tokio::spawn(async move { - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - res = rx2.recv() => { - let IncomingRequest { pending_response, .. } = res.unwrap(); - pending_response.send(OutgoingResponse { - result: Ok(vec![0; size]), - reputation_changes: vec![], - sent_feedback: None, - }).unwrap(); - }, - _ = break_rx.recv() => break, - } + for _ in 0..limit { + let IncomingRequest { pending_response, .. } = setup.rx2.recv().await.unwrap(); + pending_response + .send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }) + .unwrap(); } + break_rx.recv().await }); let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion) { +fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("request_response_benchmark"); + let mut group = c.benchmark_group(group); group.plot_config(plot_config); - for &(exponent, label) in EXPONENTS.iter() { + let libp2p_setup = setup_workers::>(&rt); + for &(exponent, limit, label) in payload.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(REQUESTS as u64 * size as u64)); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( BenchmarkId::new("libp2p/serially", label), - &(size, REQUESTS), - |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::>(size, limit) - }); - }, - ); - group.bench_with_input( - BenchmarkId::new("litep2p/serially", label), - &(size, REQUESTS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::( - size, limit, - ) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); }, ); } + drop(libp2p_setup); + + // TODO: NetworkRequest::request should be implemented for Litep2pNetworkService + let litep2p_setup = setup_workers::(&rt); + // for &(exponent, limit, label) in payload.iter() { + // let size = 2usize.pow(exponent); + // group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + // group.bench_with_input( + // BenchmarkId::new("litep2p/serially", label), + // &(size, limit), + // |b, &(size, limit)| { + // b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); + // }, + // ); + // } + drop(litep2p_setup); } -criterion_group! { - name = benches; - config = Criterion::default().sample_size(SAMPLE_SIZE); - targets = run_benchmark +fn run_benchmark_with_small_payload(c: &mut Criterion) { + run_benchmark(c, SMALL_PAYLOAD, "request_response_benchmark/small_payload"); } + +fn run_benchmark_with_large_payload(c: &mut Criterion) { + run_benchmark(c, LARGE_PAYLOAD, "request_response_benchmark/large_payload"); +} + +criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); criterion_main!(benches); diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index dbb72381b660..e2a91e961668 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -68,6 +68,7 @@ pub struct Behaviour { } /// Event generated by `Behaviour`. +#[derive(Debug)] pub enum BehaviourOut { /// Started a random iterative Kademlia discovery query. RandomKademliaStarted, @@ -310,6 +311,22 @@ impl Behaviour { ) { self.discovery.store_record(record_key, record_value, publisher, expires); } + + /// Start providing `key` on the DHT. + pub fn start_providing(&mut self, key: RecordKey) { + self.discovery.start_providing(key) + } + + /// Stop providing `key` on the DHT. + pub fn stop_providing(&mut self, key: &RecordKey) { + self.discovery.stop_providing(key) + } + + /// Start searching for providers on the DHT. Will later produce either a `ProvidersFound` + /// or `ProvidersNotFound` event. + pub fn get_providers(&mut self, key: RecordKey) { + self.discovery.get_providers(key) + } } impl From for BehaviourOut { @@ -387,6 +404,17 @@ impl From for BehaviourOut { ), DiscoveryOut::ValuePutFailed(key, duration) => BehaviourOut::Dht(DhtEvent::ValuePutFailed(key.into()), Some(duration)), + DiscoveryOut::StartProvidingFailed(key) => + BehaviourOut::Dht(DhtEvent::StartProvidingFailed(key.into()), None), + DiscoveryOut::ProvidersFound(key, providers, duration) => BehaviourOut::Dht( + DhtEvent::ProvidersFound( + key.into(), + providers.into_iter().map(Into::into).collect(), + ), + Some(duration), + ), + DiscoveryOut::ProvidersNotFound(key, duration) => + BehaviourOut::Dht(DhtEvent::ProvidersNotFound(key.into()), Some(duration)), DiscoveryOut::RandomKademliaStarted => BehaviourOut::RandomKademliaStarted, } } diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 8080bda9a574..917449cf228c 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -53,13 +53,13 @@ use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; use libp2p::{ - core::{Endpoint, Multiaddr}, + core::{transport::PortUse, Endpoint, Multiaddr}, kad::{ self, - record::store::{MemoryStore, RecordStore}, + store::{MemoryStore, RecordStore}, Behaviour as Kademlia, BucketInserts, Config as KademliaConfig, Event as KademliaEvent, - GetClosestPeersError, GetRecordOk, PeerRecord, QueryId, QueryResult, Quorum, Record, - RecordKey, + Event, GetClosestPeersError, GetProvidersError, GetProvidersOk, GetRecordOk, PeerRecord, + QueryId, QueryResult, Quorum, Record, RecordKey, }, mdns::{self, tokio::Behaviour as TokioMdns}, multiaddr::Protocol, @@ -68,8 +68,8 @@ use libp2p::{ toggle::{Toggle, ToggleConnectionHandler}, DialFailure, ExternalAddrConfirmed, FromSwarm, }, - ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, PollParameters, - StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, StreamProtocol, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -214,23 +214,14 @@ impl DiscoveryConfig { enable_mdns, kademlia_disjoint_query_paths, kademlia_protocol, - kademlia_legacy_protocol, + kademlia_legacy_protocol: _, kademlia_replication_factor, } = self; let kademlia = if let Some(ref kademlia_protocol) = kademlia_protocol { - let mut config = KademliaConfig::default(); + let mut config = KademliaConfig::new(kademlia_protocol.clone()); config.set_replication_factor(kademlia_replication_factor); - // Populate kad with both the legacy and the new protocol names. - // Remove the legacy protocol: - // https://github.com/paritytech/polkadot-sdk/issues/504 - let kademlia_protocols = if let Some(legacy_protocol) = kademlia_legacy_protocol { - vec![kademlia_protocol.clone(), legacy_protocol] - } else { - vec![kademlia_protocol.clone()] - }; - config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); config.set_record_filtering(libp2p::kad::StoreInserts::FilterBoth); @@ -466,6 +457,31 @@ impl DiscoveryBehaviour { } } } + + /// Register as a content provider on the DHT for `key`. + pub fn start_providing(&mut self, key: RecordKey) { + if let Some(kad) = self.kademlia.as_mut() { + if let Err(e) = kad.start_providing(key.clone()) { + warn!(target: "sub-libp2p", "Libp2p => Failed to start providing {key:?}: {e}."); + self.pending_events.push_back(DiscoveryOut::StartProvidingFailed(key)); + } + } + } + + /// Deregister as a content provider on the DHT for `key`. + pub fn stop_providing(&mut self, key: &RecordKey) { + if let Some(kad) = self.kademlia.as_mut() { + kad.stop_providing(key); + } + } + + /// Get content providers for `key` from the DHT. + pub fn get_providers(&mut self, key: RecordKey) { + if let Some(kad) = self.kademlia.as_mut() { + kad.get_providers(key); + } + } + /// Store a record in the Kademlia record store. pub fn store_record( &mut self, @@ -581,6 +597,15 @@ pub enum DiscoveryOut { /// Returning the corresponding key as well as the request duration. ValuePutFailed(RecordKey, Duration), + /// Starting providing a key failed. + StartProvidingFailed(RecordKey), + + /// The DHT yielded results for the providers request. + ProvidersFound(RecordKey, HashSet, Duration), + + /// Providers for the requested key were not found in the DHT. + ProvidersNotFound(RecordKey, Duration), + /// Started a random Kademlia query. /// /// Only happens if [`DiscoveryConfig::with_dht_random_walk`] has been configured to `true`. @@ -613,12 +638,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { self.kademlia.handle_established_outbound_connection( connection_id, peer, addr, role_override, + port_use, ) } @@ -690,7 +717,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { Ok(list.into_iter().collect()) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(e) => { self.num_connections += 1; @@ -777,6 +804,10 @@ impl NetworkBehaviour for DiscoveryBehaviour { self.kademlia.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); }, + event => { + debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); + self.kademlia.on_swarm_event(event); + }, } } @@ -789,11 +820,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { self.kademlia.on_connection_handler_event(peer_id, connection_id, event); } - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ToSwarm::GenerateEvent(ev)) @@ -836,7 +863,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - while let Poll::Ready(ev) = self.kademlia.poll(cx, params) { + while let Poll::Ready(ev) = self.kademlia.poll(cx) { match ev { ToSwarm::GenerateEvent(ev) => match ev { KademliaEvent::RoutingUpdated { peer, .. } => { @@ -982,6 +1009,56 @@ impl NetworkBehaviour for DiscoveryBehaviour { }; return Poll::Ready(ToSwarm::GenerateEvent(ev)) }, + KademliaEvent::OutboundQueryProgressed { + result: QueryResult::GetProviders(res), + stats, + id, + .. + } => { + let ev = match res { + Ok(GetProvidersOk::FoundProviders { key, providers }) => { + debug!( + target: "sub-libp2p", + "Libp2p => Found providers {:?} for key {:?}, id {:?}, stats {:?}", + providers, + key, + id, + stats, + ); + + DiscoveryOut::ProvidersFound( + key, + providers, + stats.duration().unwrap_or_default(), + ) + }, + Ok(GetProvidersOk::FinishedWithNoAdditionalRecord { + closest_peers: _, + }) => { + debug!( + target: "sub-libp2p", + "Libp2p => Finished with no additional providers {:?}, stats {:?}, took {:?} ms", + id, + stats, + stats.duration().map(|val| val.as_millis()) + ); + + continue + }, + Err(GetProvidersError::Timeout { key, closest_peers: _ }) => { + debug!( + target: "sub-libp2p", + "Libp2p => Failed to get providers for {key:?} due to timeout.", + ); + + DiscoveryOut::ProvidersNotFound( + key, + stats.duration().unwrap_or_default(), + ) + }, + }; + return Poll::Ready(ToSwarm::GenerateEvent(ev)) + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::PutRecord(res), stats, @@ -1019,30 +1096,38 @@ impl NetworkBehaviour for DiscoveryBehaviour { e.key(), e, ), }, + KademliaEvent::OutboundQueryProgressed { + result: QueryResult::Bootstrap(res), + .. + } => match res { + Ok(ok) => debug!( + target: "sub-libp2p", + "Libp2p => DHT bootstrap progressed: {ok:?}", + ), + Err(e) => warn!( + target: "sub-libp2p", + "Libp2p => DHT bootstrap error: {e:?}", + ), + }, // We never start any other type of query. KademliaEvent::OutboundQueryProgressed { result: e, .. } => { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) }, + Event::ModeChanged { new_mode } => { + debug!(target: "sub-libp2p", "Libp2p => Kademlia mode changed: {new_mode}") + }, }, ToSwarm::Dial { opts } => return Poll::Ready(ToSwarm::Dial { opts }), - ToSwarm::NotifyHandler { peer_id, handler, event } => - return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), - ToSwarm::CloseConnection { peer_id, connection } => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - ToSwarm::NewExternalAddrCandidate(observed) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - ToSwarm::ExternalAddrConfirmed(addr) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - ToSwarm::ExternalAddrExpired(addr) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), - ToSwarm::RemoveListener { id } => - return Poll::Ready(ToSwarm::RemoveListener { id }), + event => { + return Poll::Ready(event.map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + })); + }, } } // Poll mDNS. - while let Poll::Ready(ev) = self.mdns.poll(cx, params) { + while let Poll::Ready(ev) = self.mdns.poll(cx) { match ev { ToSwarm::GenerateEvent(event) => match event { mdns::Event::Discovered(list) => { @@ -1064,17 +1149,17 @@ impl NetworkBehaviour for DiscoveryBehaviour { }, // `event` is an enum with no variant ToSwarm::NotifyHandler { event, .. } => match event {}, - ToSwarm::CloseConnection { peer_id, connection } => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - ToSwarm::NewExternalAddrCandidate(observed) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - ToSwarm::ExternalAddrConfirmed(addr) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - ToSwarm::ExternalAddrExpired(addr) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), - ToSwarm::RemoveListener { id } => - return Poll::Ready(ToSwarm::RemoveListener { id }), + event => { + return Poll::Ready( + event + .map_in(|_| { + unreachable!("`NotifyHandler` is handled in a branch above; qed") + }) + .map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + }), + ); + }, } } @@ -1117,21 +1202,14 @@ mod tests { }, identity::Keypair, noise, - swarm::{Executor, Swarm, SwarmEvent}, + swarm::{Swarm, SwarmEvent}, yamux, Multiaddr, }; use sp_core::hash::H256; - use std::{collections::HashSet, pin::Pin, task::Poll}; + use std::{collections::HashSet, task::Poll, time::Duration}; - struct TokioExecutor(tokio::runtime::Runtime); - impl Executor for TokioExecutor { - fn exec(&self, f: Pin + Send>>) { - let _ = self.0.spawn(f); - } - } - - #[test] - fn discovery_working() { + #[tokio::test] + async fn discovery_working() { let mut first_swarm_peer_id_and_addr = None; let genesis_hash = H256::from_low_u64_be(1); @@ -1142,42 +1220,40 @@ mod tests { // the first swarm via `with_permanent_addresses`. let mut swarms = (0..25) .map(|i| { - let keypair = Keypair::generate_ed25519(); - - let transport = MemoryTransport::new() - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(); - - let behaviour = { - let mut config = DiscoveryConfig::new(keypair.public().to_peer_id()); - config - .with_permanent_addresses(first_swarm_peer_id_and_addr.clone()) - .allow_private_ip(true) - .allow_non_globals_in_dht(true) - .discovery_limit(50) - .with_kademlia(genesis_hash, fork_id, &protocol_id); - - config.finish() - }; - - let runtime = tokio::runtime::Runtime::new().unwrap(); - #[allow(deprecated)] - let mut swarm = libp2p::swarm::SwarmBuilder::with_executor( - transport, - behaviour, - keypair.public().to_peer_id(), - TokioExecutor(runtime), - ) - .build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|keypair| { + MemoryTransport::new() + .upgrade(upgrade::Version::V1) + .authenticate(noise::Config::new(&keypair).unwrap()) + .multiplex(yamux::Config::default()) + .boxed() + }) + .unwrap() + .with_behaviour(|keypair| { + let mut config = DiscoveryConfig::new(keypair.public().to_peer_id()); + config + .with_permanent_addresses(first_swarm_peer_id_and_addr.clone()) + .allow_private_ip(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .with_kademlia(genesis_hash, fork_id, &protocol_id); + + config.finish() + }) + .unwrap() + .with_swarm_config(|config| { + // This is taken care of by notification protocols in non-test environment + config.with_idle_connection_timeout(Duration::from_secs(10)) + }) + .build(); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); if i == 0 { first_swarm_peer_id_and_addr = - Some((keypair.public().to_peer_id(), listen_addr.clone())) + Some((*swarm.local_peer_id(), listen_addr.clone())) } swarm.listen_on(listen_addr.clone()).unwrap(); @@ -1264,7 +1340,7 @@ mod tests { } }); - futures::executor::block_on(fut); + fut.await } #[test] diff --git a/substrate/client/network/src/event.rs b/substrate/client/network/src/event.rs index 626cf516a7ec..e8ec1eee2545 100644 --- a/substrate/client/network/src/event.rs +++ b/substrate/client/network/src/event.rs @@ -45,8 +45,17 @@ pub enum DhtEvent { /// An error has occurred while putting a record into the DHT. ValuePutFailed(Key), + /// An error occured while registering as a content provider on the DHT. + StartProvidingFailed(Key), + /// The DHT received a put record request. PutRecordRequest(Key, Vec, Option, Option), + + /// The providers for [`Key`] were found. + ProvidersFound(Key, Vec), + + /// The providers for [`Key`] were not found. + ProvidersNotFound(Key), } /// Type for events generated by networking layer. diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 3a9454e317cc..2bea2e5a80dc 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -32,7 +32,7 @@ use litep2p::{ libp2p::{ identify::{Config as IdentifyConfig, IdentifyEvent}, kademlia::{ - Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, + Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, ContentProvider, IncomingRecordValidationMode, KademliaEvent, KademliaHandle, QueryId, Quorum, Record, RecordKey, RecordsType, }, @@ -144,6 +144,14 @@ pub enum DiscoveryEvent { query_id: QueryId, }, + /// Providers were successfully retrieved. + GetProvidersSuccess { + /// Query ID. + query_id: QueryId, + /// Found providers sorted by distance to provided key. + providers: Vec, + }, + /// Query failed. QueryFailed { /// Query ID. @@ -407,6 +415,21 @@ impl Discovery { .await; } + /// Start providing `key`. + pub async fn start_providing(&mut self, key: KademliaKey) { + self.kademlia_handle.start_providing(key.into()).await; + } + + /// Stop providing `key`. + pub async fn stop_providing(&mut self, key: KademliaKey) { + self.kademlia_handle.stop_providing(key.into()).await; + } + + /// Get providers for `key`. + pub async fn get_providers(&mut self, key: KademliaKey) -> QueryId { + self.kademlia_handle.get_providers(key.into()).await + } + /// Check if the observed address is a known address. fn is_known_address(known: &Multiaddr, observed: &Multiaddr) -> bool { let mut known = known.iter(); @@ -581,8 +604,22 @@ impl Stream for Discovery { return Poll::Ready(Some(DiscoveryEvent::IncomingRecord { record })) }, - // Content provider events are ignored for now. - Poll::Ready(Some(KademliaEvent::GetProvidersSuccess { .. })) | + Poll::Ready(Some(KademliaEvent::GetProvidersSuccess { + provided_key, + providers, + query_id, + })) => { + log::trace!( + target: LOG_TARGET, + "`GET_PROVIDERS` for {query_id:?} with {provided_key:?} yielded {providers:?}", + ); + + return Poll::Ready(Some(DiscoveryEvent::GetProvidersSuccess { + query_id, + providers, + })) + }, + // We do not validate incoming providers. Poll::Ready(Some(KademliaEvent::IncomingProvider { .. })) => {}, } diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 10cf9f4da36d..52b2970525df 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -143,6 +143,17 @@ struct ConnectionContext { num_connections: usize, } +/// Kademlia query we are tracking. +#[derive(Debug)] +enum KadQuery { + /// `GET_VALUE` query for key and when it was initiated. + GetValue(RecordKey, Instant), + /// `PUT_VALUE` query for key and when it was initiated. + PutValue(RecordKey, Instant), + /// `GET_PROVIDERS` query for key and when it was initiated. + GetProviders(RecordKey, Instant), +} + /// Networking backend for `litep2p`. pub struct Litep2pNetworkBackend { /// Main `litep2p` object. @@ -157,11 +168,8 @@ pub struct Litep2pNetworkBackend { /// `Peerset` handles to notification protocols. peerset_handles: HashMap, - /// Pending `GET_VALUE` queries. - pending_get_values: HashMap, - - /// Pending `PUT_VALUE` queries. - pending_put_values: HashMap, + /// Pending Kademlia queries. + pending_queries: HashMap, /// Discovery. discovery: Discovery, @@ -615,8 +623,7 @@ impl NetworkBackend for Litep2pNetworkBac peerset_handles: notif_protocols, num_connected, discovery, - pending_put_values: HashMap::new(), - pending_get_values: HashMap::new(), + pending_queries: HashMap::new(), peerstore_handle: peer_store_handle, block_announce_protocol, event_streams: out_events::OutChannels::new(None)?, @@ -704,21 +711,30 @@ impl NetworkBackend for Litep2pNetworkBac Some(command) => match command { NetworkServiceCommand::GetValue{ key } => { let query_id = self.discovery.get_value(key.clone()).await; - self.pending_get_values.insert(query_id, (key, Instant::now())); + self.pending_queries.insert(query_id, KadQuery::GetValue(key, Instant::now())); } NetworkServiceCommand::PutValue { key, value } => { let query_id = self.discovery.put_value(key.clone(), value).await; - self.pending_put_values.insert(query_id, (key, Instant::now())); + self.pending_queries.insert(query_id, KadQuery::PutValue(key, Instant::now())); } NetworkServiceCommand::PutValueTo { record, peers, update_local_storage} => { let kademlia_key = record.key.clone(); let query_id = self.discovery.put_value_to_peers(record.into(), peers, update_local_storage).await; - self.pending_put_values.insert(query_id, (kademlia_key, Instant::now())); + self.pending_queries.insert(query_id, KadQuery::PutValue(kademlia_key, Instant::now())); } - NetworkServiceCommand::StoreRecord { key, value, publisher, expires } => { self.discovery.store_record(key, value, publisher.map(Into::into), expires).await; } + NetworkServiceCommand::StartProviding { key } => { + self.discovery.start_providing(key).await; + } + NetworkServiceCommand::StopProviding { key } => { + self.discovery.stop_providing(key).await; + } + NetworkServiceCommand::GetProviders { key } => { + let query_id = self.discovery.get_providers(key.clone()).await; + self.pending_queries.insert(query_id, KadQuery::GetProviders(key, Instant::now())); + } NetworkServiceCommand::EventStream { tx } => { self.event_streams.push(tx); } @@ -753,7 +769,7 @@ impl NetworkBackend for Litep2pNetworkBac } if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize { - log::warn!( + log::debug!( target: LOG_TARGET, "couldn't add known address ({address}) for {peer:?}, unsupported transport" ); @@ -821,12 +837,8 @@ impl NetworkBackend for Litep2pNetworkBac } } Some(DiscoveryEvent::GetRecordSuccess { query_id, records }) => { - match self.pending_get_values.remove(&query_id) { - None => log::warn!( - target: LOG_TARGET, - "`GET_VALUE` succeeded for a non-existent query", - ), - Some((key, started)) => { + match self.pending_queries.remove(&query_id) { + Some(KadQuery::GetValue(key, started)) => { log::trace!( target: LOG_TARGET, "`GET_VALUE` for {:?} ({query_id:?}) succeeded", @@ -848,16 +860,19 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-get"]) .observe(started.elapsed().as_secs_f64()); } - } + }, + query => { + log::error!( + target: LOG_TARGET, + "Missing/invalid pending query for `GET_VALUE`: {query:?}" + ); + debug_assert!(false); + }, } } Some(DiscoveryEvent::PutRecordSuccess { query_id }) => { - match self.pending_put_values.remove(&query_id) { - None => log::warn!( - target: LOG_TARGET, - "`PUT_VALUE` succeeded for a non-existent query", - ), - Some((key, started)) => { + match self.pending_queries.remove(&query_id) { + Some(KadQuery::PutValue(key, started)) => { log::trace!( target: LOG_TARGET, "`PUT_VALUE` for {key:?} ({query_id:?}) succeeded", @@ -873,35 +888,50 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-put"]) .observe(started.elapsed().as_secs_f64()); } + }, + query => { + log::error!( + target: LOG_TARGET, + "Missing/invalid pending query for `PUT_VALUE`: {query:?}" + ); + debug_assert!(false); } } } - Some(DiscoveryEvent::QueryFailed { query_id }) => { - match self.pending_get_values.remove(&query_id) { - None => match self.pending_put_values.remove(&query_id) { - None => log::warn!( + Some(DiscoveryEvent::GetProvidersSuccess { query_id, providers }) => { + match self.pending_queries.remove(&query_id) { + Some(KadQuery::GetProviders(key, started)) => { + log::trace!( target: LOG_TARGET, - "non-existent query failed ({query_id:?})", - ), - Some((key, started)) => { - log::debug!( - target: LOG_TARGET, - "`PUT_VALUE` ({query_id:?}) failed for key {key:?}", - ); + "`GET_PROVIDERS` for {key:?} ({query_id:?}) succeeded", + ); - self.event_streams.send(Event::Dht( - DhtEvent::ValuePutFailed(key) - )); + self.event_streams.send(Event::Dht( + DhtEvent::ProvidersFound( + key.into(), + providers.into_iter().map(|p| p.peer.into()).collect() + ) + )); - if let Some(ref metrics) = self.metrics { - metrics - .kademlia_query_duration - .with_label_values(&["value-put-failed"]) - .observe(started.elapsed().as_secs_f64()); - } + if let Some(ref metrics) = self.metrics { + metrics + .kademlia_query_duration + .with_label_values(&["providers-get"]) + .observe(started.elapsed().as_secs_f64()); } + }, + query => { + log::error!( + target: LOG_TARGET, + "Missing/invalid pending query for `GET_PROVIDERS`: {query:?}" + ); + debug_assert!(false); } - Some((key, started)) => { + } + } + Some(DiscoveryEvent::QueryFailed { query_id }) => { + match self.pending_queries.remove(&query_id) { + Some(KadQuery::GetValue(key, started)) => { log::debug!( target: LOG_TARGET, "`GET_VALUE` ({query_id:?}) failed for key {key:?}", @@ -917,6 +947,46 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-get-failed"]) .observe(started.elapsed().as_secs_f64()); } + }, + Some(KadQuery::PutValue(key, started)) => { + log::debug!( + target: LOG_TARGET, + "`PUT_VALUE` ({query_id:?}) failed for key {key:?}", + ); + + self.event_streams.send(Event::Dht( + DhtEvent::ValuePutFailed(key) + )); + + if let Some(ref metrics) = self.metrics { + metrics + .kademlia_query_duration + .with_label_values(&["value-put-failed"]) + .observe(started.elapsed().as_secs_f64()); + } + }, + Some(KadQuery::GetProviders(key, started)) => { + log::debug!( + target: LOG_TARGET, + "`GET_PROVIDERS` ({query_id:?}) failed for key {key:?}" + ); + + self.event_streams.send(Event::Dht( + DhtEvent::ProvidersNotFound(key) + )); + + if let Some(ref metrics) = self.metrics { + metrics + .kademlia_query_duration + .with_label_values(&["providers-get-failed"]) + .observe(started.elapsed().as_secs_f64()); + } + }, + None => { + log::warn!( + target: LOG_TARGET, + "non-existent query failed ({query_id:?})", + ); } } } @@ -986,7 +1056,15 @@ impl NetworkBackend for Litep2pNetworkBac let direction = match endpoint { Endpoint::Dialer { .. } => "out", - Endpoint::Listener { .. } => "in", + Endpoint::Listener { .. } => { + // Increment incoming connections counter. + // + // Note: For litep2p these are represented by established negotiated connections, + // while for libp2p (legacy) these represent not-yet-negotiated connections. + metrics.incoming_connections_total.inc(); + + "in" + }, }; metrics.connections_opened_total.with_label_values(&[direction]).inc(); @@ -1058,6 +1136,7 @@ impl NetworkBackend for Litep2pNetworkBac NegotiationError::ParseError(_) => "parse-error", NegotiationError::IoError(_) => "io-error", NegotiationError::WebSocket(_) => "webscoket-error", + NegotiationError::BadSignature => "bad-signature", } }; diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index fa1d47e5a1b7..d270e90efdf5 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -104,6 +104,15 @@ pub enum NetworkServiceCommand { expires: Option, }, + /// Start providing `key`. + StartProviding { key: KademliaKey }, + + /// Stop providing `key`. + StopProviding { key: KademliaKey }, + + /// Get providers for `key`. + GetProviders { key: KademliaKey }, + /// Query network status. Status { /// `oneshot::Sender` for sending the status. @@ -296,6 +305,18 @@ impl NetworkDHTProvider for Litep2pNetworkService { expires, }); } + + fn start_providing(&self, key: KademliaKey) { + let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::StartProviding { key }); + } + + fn stop_providing(&self, key: KademliaKey) { + let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::StopProviding { key }); + } + + fn get_providers(&self, key: KademliaKey) { + let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::GetProviders { key }); + } } #[async_trait::async_trait] diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs index bfd7a60ef9fe..146f2e4add97 100644 --- a/substrate/client/network/src/litep2p/shim/request_response/mod.rs +++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs @@ -320,7 +320,7 @@ impl RequestResponseProtocol { &mut self, peer: litep2p::PeerId, request_id: RequestId, - fallback: Option, + _fallback: Option, response: Vec, ) { match self.pending_inbound_responses.remove(&request_id) { @@ -337,10 +337,7 @@ impl RequestResponseProtocol { response.len(), ); - let _ = tx.send(Ok(( - response, - fallback.map_or_else(|| self.protocol.clone(), Into::into), - ))); + let _ = tx.send(Ok((response, self.protocol.clone()))); self.metrics.register_outbound_request_success(started.elapsed()); }, } diff --git a/substrate/client/network/src/network_state.rs b/substrate/client/network/src/network_state.rs index cf8b8b55a7ff..65fd494739ee 100644 --- a/substrate/client/network/src/network_state.rs +++ b/substrate/client/network/src/network_state.rs @@ -106,7 +106,7 @@ pub enum Endpoint { impl From for PeerEndpoint { fn from(endpoint: ConnectedPoint) -> Self { match endpoint { - ConnectedPoint::Dialer { address, role_override } => + ConnectedPoint::Dialer { address, role_override, port_use: _ } => Self::Dialing(address, role_override.into()), ConnectedPoint::Listener { local_addr, send_back_addr } => Self::Listening { local_addr, send_back_addr }, diff --git a/substrate/client/network/src/peer_info.rs b/substrate/client/network/src/peer_info.rs index 21eeea6bcc0c..a673f06fd622 100644 --- a/substrate/client/network/src/peer_info.rs +++ b/substrate/client/network/src/peer_info.rs @@ -25,7 +25,7 @@ use either::Either; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ - core::{ConnectedPoint, Endpoint}, + core::{transport::PortUse, ConnectedPoint, Endpoint}, identify::{ Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent, Info as IdentifyInfo, @@ -38,8 +38,8 @@ use libp2p::{ ExternalAddrConfirmed, FromSwarm, ListenFailure, }, ConnectionDenied, ConnectionHandler, ConnectionHandlerSelect, ConnectionId, - NetworkBehaviour, NewExternalAddrCandidate, PollParameters, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + NetworkBehaviour, NewExternalAddrCandidate, THandler, THandlerInEvent, THandlerOutEvent, + ToSwarm, }, Multiaddr, PeerId, }; @@ -275,23 +275,26 @@ impl NetworkBehaviour for PeerInfoBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { let ping_handler = self.ping.handle_established_outbound_connection( connection_id, peer, addr, role_override, + port_use, )?; let identify_handler = self.identify.handle_established_outbound_connection( connection_id, peer, addr, role_override, + port_use, )?; Ok(ping_handler.select(identify_handler)) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished( e @ ConnectionEstablished { peer_id, endpoint, .. }, @@ -319,22 +322,21 @@ impl NetworkBehaviour for PeerInfoBehaviour { peer_id, connection_id, endpoint, - handler, + cause, remaining_established, }) => { - let (ping_handler, identity_handler) = handler.into_inner(); self.ping.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, endpoint, - handler: ping_handler, + cause, remaining_established, })); self.identify.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, endpoint, - handler: identity_handler, + cause, remaining_established, })); @@ -369,18 +371,21 @@ impl NetworkBehaviour for PeerInfoBehaviour { send_back_addr, error, connection_id, + peer_id, }) => { self.ping.on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr, send_back_addr, error, connection_id, + peer_id, })); self.identify.on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr, send_back_addr, error, connection_id, + peer_id, })); }, FromSwarm::ListenerError(e) => { @@ -438,6 +443,11 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.ping.on_swarm_event(FromSwarm::NewListenAddr(e)); self.identify.on_swarm_event(FromSwarm::NewListenAddr(e)); }, + event => { + debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); + self.ping.on_swarm_event(event); + self.identify.on_swarm_event(event); + }, } } @@ -455,47 +465,29 @@ impl NetworkBehaviour for PeerInfoBehaviour { } } - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { if let Some(event) = self.pending_actions.pop_front() { return Poll::Ready(event) } loop { - match self.ping.poll(cx, params) { + match self.ping.poll(cx) { Poll::Pending => break, Poll::Ready(ToSwarm::GenerateEvent(ev)) => { if let PingEvent { peer, result: Ok(rtt), connection } = ev { self.handle_ping_report(&peer, rtt, connection) } }, - Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), - Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event: Either::Left(event), - }), - Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - Poll::Ready(ToSwarm::ListenOn { opts }) => - return Poll::Ready(ToSwarm::ListenOn { opts }), - Poll::Ready(ToSwarm::RemoveListener { id }) => - return Poll::Ready(ToSwarm::RemoveListener { id }), + Poll::Ready(event) => { + return Poll::Ready(event.map_in(Either::Left).map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + })); + }, } } loop { - match self.identify.poll(cx, params) { + match self.identify.poll(cx) { Poll::Pending => break, Poll::Ready(ToSwarm::GenerateEvent(event)) => match event { IdentifyEvent::Received { peer_id, info, .. } => { @@ -503,31 +495,20 @@ impl NetworkBehaviour for PeerInfoBehaviour { let event = PeerInfoEvent::Identified { peer_id, info }; return Poll::Ready(ToSwarm::GenerateEvent(event)) }, - IdentifyEvent::Error { peer_id, error } => { - debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error) + IdentifyEvent::Error { connection_id, peer_id, error } => { + debug!( + target: "sub-libp2p", + "Identification with peer {peer_id:?}({connection_id}) failed => {error}" + ); }, IdentifyEvent::Pushed { .. } => {}, IdentifyEvent::Sent { .. } => {}, }, - Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), - Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event: Either::Right(event), - }), - Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - Poll::Ready(ToSwarm::ListenOn { opts }) => - return Poll::Ready(ToSwarm::ListenOn { opts }), - Poll::Ready(ToSwarm::RemoveListener { id }) => - return Poll::Ready(ToSwarm::RemoveListener { id }), + Poll::Ready(event) => { + return Poll::Ready(event.map_in(Either::Right).map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + })); + }, } } diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index 402baa7bb2a4..6da1d601b34f 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -27,10 +27,10 @@ use crate::{ use codec::Encode; use libp2p::{ - core::Endpoint, + core::{transport::PortUse, Endpoint}, swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }, Multiaddr, PeerId, }; @@ -47,9 +47,7 @@ use notifications::{Notifications, NotificationsOut}; pub(crate) use notifications::ProtocolHandle; -pub use notifications::{ - notification_service, NotificationsSink, NotifsHandlerError, ProtocolHandlePair, Ready, -}; +pub use notifications::{notification_service, NotificationsSink, ProtocolHandlePair, Ready}; mod notifications; @@ -250,12 +248,14 @@ impl NetworkBehaviour for Protocol { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { self.behaviour.handle_established_outbound_connection( connection_id, peer, addr, role_override, + port_use, ) } @@ -271,7 +271,7 @@ impl NetworkBehaviour for Protocol { Ok(Vec::new()) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.behaviour.on_swarm_event(event); } @@ -287,26 +287,15 @@ impl NetworkBehaviour for Protocol { fn poll( &mut self, cx: &mut std::task::Context, - params: &mut impl PollParameters, ) -> Poll>> { - let event = match self.behaviour.poll(cx, params) { + let event = match self.behaviour.poll(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(ToSwarm::GenerateEvent(ev)) => ev, - Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), - Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), - Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - Poll::Ready(ToSwarm::ListenOn { opts }) => - return Poll::Ready(ToSwarm::ListenOn { opts }), - Poll::Ready(ToSwarm::RemoveListener { id }) => - return Poll::Ready(ToSwarm::RemoveListener { id }), + Poll::Ready(event) => { + return Poll::Ready(event.map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + })); + }, }; let outcome = match event { diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs index 10fa329097d1..2691496234ad 100644 --- a/substrate/client/network/src/protocol/notifications.rs +++ b/substrate/client/network/src/protocol/notifications.rs @@ -21,7 +21,7 @@ pub use self::{ behaviour::{Notifications, NotificationsOut, ProtocolConfig}, - handler::{NotificationsSink, NotifsHandlerError, Ready}, + handler::{NotificationsSink, Ready}, service::{notification_service, ProtocolHandlePair}, }; diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index a562546145c8..e6909fcdefea 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -33,11 +33,11 @@ use bytes::BytesMut; use fnv::FnvHashMap; use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ - core::{Endpoint, Multiaddr}, + core::{transport::PortUse, Endpoint, Multiaddr}, swarm::{ behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, - ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -49,6 +49,7 @@ use smallvec::SmallVec; use tokio::sync::oneshot::error::RecvError; use tokio_stream::StreamMap; +use libp2p::swarm::CloseConnection; use std::{ cmp, collections::{hash_map::Entry, VecDeque}, @@ -1233,11 +1234,12 @@ impl NetworkBehaviour for Notifications { peer: PeerId, _addr: &Multiaddr, _role_override: Endpoint, + _port_use: PortUse, ) -> Result, ConnectionDenied> { Ok(NotifsHandler::new(peer, self.notif_protocols.clone(), Some(self.metrics.clone()))) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, @@ -1670,6 +1672,9 @@ impl NetworkBehaviour for Notifications { FromSwarm::ExternalAddrConfirmed(_) => {}, FromSwarm::AddressChange(_) => {}, FromSwarm::NewListenAddr(_) => {}, + event => { + warn!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); + }, } } @@ -2217,14 +2222,19 @@ impl NetworkBehaviour for Notifications { ); } }, + NotifsHandlerOut::Close { protocol_index } => { + let set_id = SetId::from(protocol_index); + + trace!(target: "sub-libp2p", "Handler({}, {:?}) => SyncNotificationsClogged({:?})", peer_id, connection_id, set_id); + self.events.push_back(ToSwarm::CloseConnection { + peer_id, + connection: CloseConnection::One(connection_id), + }); + }, } } - fn poll( - &mut self, - cx: &mut Context, - _params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event) } @@ -2359,7 +2369,6 @@ impl NetworkBehaviour for Notifications { } #[cfg(test)] -#[allow(deprecated)] mod tests { use super::*; use crate::{ @@ -2386,17 +2395,6 @@ mod tests { } } - #[derive(Clone)] - struct MockPollParams {} - - impl PollParameters for MockPollParams { - type SupportedProtocolsIter = std::vec::IntoIter>; - - fn supported_protocols(&self) -> Self::SupportedProtocolsIter { - vec![].into_iter() - } - } - fn development_notifs( ) -> (Notifications, ProtocolController, Box) { @@ -2654,7 +2652,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -2854,7 +2852,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3007,7 +3005,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3051,7 +3049,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3121,7 +3119,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3269,7 +3267,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3395,7 +3393,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3469,7 +3467,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3532,7 +3530,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3546,7 +3544,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3600,7 +3598,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3658,7 +3656,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3719,7 +3717,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3788,7 +3786,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3829,7 +3827,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3952,7 +3950,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3972,11 +3970,9 @@ mod tests { assert!(notif.peers.get(&(peer, set_id)).is_some()); if tokio::time::timeout(Duration::from_secs(5), async { - let mut params = MockPollParams {}; - loop { futures::future::poll_fn(|cx| { - let _ = notif.poll(cx, &mut params); + let _ = notif.poll(cx); Poll::Ready(()) }) .await; @@ -4080,11 +4076,9 @@ mod tests { // verify that the code continues to keep the peer disabled by resetting the timer // after the first one expired. if tokio::time::timeout(Duration::from_secs(5), async { - let mut params = MockPollParams {}; - loop { futures::future::poll_fn(|cx| { - let _ = notif.poll(cx, &mut params); + let _ = notif.poll(cx); Poll::Ready(()) }) .await; @@ -4262,7 +4256,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4503,7 +4497,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4605,7 +4599,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4687,7 +4681,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &endpoint.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4804,7 +4798,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4839,7 +4833,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4890,7 +4884,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4937,7 +4931,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4987,7 +4981,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -5030,7 +5024,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -5041,7 +5035,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, vec![], None), + cause: None, remaining_established: 0usize, }, )); diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index bff60ba1125f..332de9f19c41 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -74,12 +74,12 @@ use futures::{ }; use libp2p::{ swarm::{ - handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream, + handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, Stream, SubstreamProtocol, }, PeerId, }; -use log::error; +use log::{error, warn}; use parking_lot::{Mutex, RwLock}; use std::{ collections::VecDeque, @@ -87,7 +87,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, - time::{Duration, Instant}, + time::Duration, }; /// Number of pending notifications in asynchronous contexts. @@ -113,16 +113,18 @@ pub struct NotifsHandler { /// List of notification protocols, specified by the user at initialization. protocols: Vec, - /// When the connection with the remote has been successfully established. - when_connection_open: Instant, + /// Whether to keep connection alive + keep_alive: bool, + + /// Optional future that keeps connection alive for a certain amount of time. + // TODO: this should be safe to remove, see https://github.com/paritytech/polkadot-sdk/issues/6350 + keep_alive_timeout_future: Option + Send + 'static>>>, /// Remote we are connected to. peer_id: PeerId, /// Events to return in priority from `poll`. - events_queue: VecDeque< - ConnectionHandlerEvent, - >, + events_queue: VecDeque>, /// Metrics. metrics: Option>, @@ -149,7 +151,12 @@ impl NotifsHandler { }) .collect(), peer_id, - when_connection_open: Instant::now(), + // Keep connection alive initially until below timeout expires + keep_alive: true, + // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote + // to express desire to open substreams. + // TODO: This is a hack and ideally should not be necessary + keep_alive_timeout_future: Some(Box::pin(tokio::time::sleep(INITIAL_KEEPALIVE_TIME))), events_queue: VecDeque::with_capacity(16), metrics: metrics.map_or(None, |metrics| Some(Arc::new(metrics))), } @@ -327,6 +334,12 @@ pub enum NotifsHandlerOut { /// Message that has been received. message: BytesMut, }, + + /// Close connection + Close { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, } /// Sink connected directly to the node background task. Allows sending notifications to the peer. @@ -465,17 +478,9 @@ impl<'a> Ready<'a> { } } -/// Error specific to the collection of protocols. -#[derive(Debug, thiserror::Error)] -pub enum NotifsHandlerError { - #[error("Channel of synchronous notifications is full.")] - SyncNotificationsClogged, -} - impl ConnectionHandler for NotifsHandler { type FromBehaviour = NotifsHandlerIn; type ToBehaviour = NotifsHandlerOut; - type Error = NotifsHandlerError; type InboundProtocol = UpgradeCollec; type OutboundProtocol = NotificationsOut; // Index within the `out_protocols`. @@ -616,6 +621,9 @@ impl ConnectionHandler for NotifsHandler { State::Open { .. } => debug_assert!(false), }, ConnectionEvent::ListenUpgradeError(_listen_upgrade_error) => {}, + event => { + warn!(target: "sub-libp2p", "New unknown `ConnectionEvent` libp2p event: {event:?}"); + }, } } @@ -711,35 +719,36 @@ impl ConnectionHandler for NotifsHandler { } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { // `Yes` if any protocol has some activity. if self.protocols.iter().any(|p| !matches!(p.state, State::Closed { .. })) { - return KeepAlive::Yes + return true; } - // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote - // to express desire to open substreams. - #[allow(deprecated)] - KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME) + self.keep_alive } - #[allow(deprecated)] fn poll( &mut self, cx: &mut Context, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { + { + let maybe_keep_alive_timeout_future = &mut self.keep_alive_timeout_future; + if let Some(keep_alive_timeout_future) = maybe_keep_alive_timeout_future { + if keep_alive_timeout_future.poll_unpin(cx).is_ready() { + maybe_keep_alive_timeout_future.take(); + self.keep_alive = false; + } + } + } + if let Some(ev) = self.events_queue.pop_front() { return Poll::Ready(ev) } - // For each open substream, try send messages from `notifications_sink_rx` to the + // For each open substream, try to send messages from `notifications_sink_rx` to the // substream. for protocol_index in 0..self.protocols.len() { if let State::Open { @@ -750,11 +759,10 @@ impl ConnectionHandler for NotifsHandler { // Only proceed with `out_substream.poll_ready_unpin` if there is an element // available in `notifications_sink_rx`. This avoids waking up the task when // a substream is ready to send if there isn't actually something to send. - #[allow(deprecated)] match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => - return Poll::Ready(ConnectionHandlerEvent::Close( - NotifsHandlerError::SyncNotificationsClogged, + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + NotifsHandlerOut::Close { protocol_index }, )), Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {}, Poll::Ready(None) | Poll::Pending => break, @@ -975,6 +983,17 @@ pub mod tests { rx_buffer: BytesMut, } + /// Mirror of `ActiveStreamCounter` in `libp2p` + #[allow(dead_code)] + struct MockActiveStreamCounter(Arc<()>); + + // Mirror of `Stream` in `libp2p` + #[allow(dead_code)] + struct MockStream { + stream: Negotiated, + counter: Option, + } + impl MockSubstream { /// Create new substream pair. pub fn new() -> (Self, Self) { @@ -1004,16 +1023,11 @@ pub mod tests { /// Unsafe substitute for `Stream::new` private constructor. fn stream_new(stream: Negotiated) -> Stream { + let stream = MockStream { stream, counter: None }; // Static asserts to make sure this doesn't break. const _: () = { - assert!( - core::mem::size_of::() == - core::mem::size_of::>() - ); - assert!( - core::mem::align_of::() == - core::mem::align_of::>() - ); + assert!(core::mem::size_of::() == core::mem::size_of::()); + assert!(core::mem::align_of::() == core::mem::align_of::()); }; unsafe { core::mem::transmute(stream) } @@ -1084,24 +1098,16 @@ pub mod tests { /// Create new [`NotifsHandler`]. fn notifs_handler() -> NotifsHandler { - let proto = Protocol { - config: ProtocolConfig { + NotifsHandler::new( + PeerId::random(), + vec![ProtocolConfig { name: "/foo".into(), fallback_names: vec![], handshake: Arc::new(RwLock::new(b"hello, world".to_vec())), max_notification_size: u64::MAX, - }, - in_upgrade: NotificationsIn::new("/foo", Vec::new(), u64::MAX), - state: State::Closed { pending_opening: false }, - }; - - NotifsHandler { - protocols: vec![proto], - when_connection_open: Instant::now(), - peer_id: PeerId::random(), - events_queue: VecDeque::new(), - metrics: None, - } + }], + None, + ) } // verify that if another substream is attempted to be opened by remote while an inbound @@ -1608,12 +1614,11 @@ pub mod tests { notifications_sink.send_sync_notification(vec![1, 3, 3, 9]); notifications_sink.send_sync_notification(vec![1, 3, 4, 0]); - #[allow(deprecated)] futures::future::poll_fn(|cx| { assert!(std::matches!( handler.poll(cx), - Poll::Ready(ConnectionHandlerEvent::Close( - NotifsHandlerError::SyncNotificationsClogged, + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + NotifsHandlerOut::Close { .. } )) )); Poll::Ready(()) diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index a8eeb2bb1980..50f03b5911b6 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -30,30 +30,25 @@ use crate::{ use futures::{future::BoxFuture, prelude::*}; use libp2p::{ - core::{transport::MemoryTransport, upgrade, Endpoint}, + core::{ + transport::{MemoryTransport, PortUse}, + upgrade, Endpoint, + }, identity, noise, swarm::{ - self, behaviour::FromSwarm, ConnectionDenied, ConnectionId, Executor, NetworkBehaviour, - PollParameters, Swarm, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, Swarm, SwarmEvent, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, - yamux, Multiaddr, PeerId, Transport, + yamux, Multiaddr, PeerId, SwarmBuilder, Transport, }; use sc_utils::mpsc::tracing_unbounded; use std::{ iter, - pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration, }; -struct TokioExecutor(tokio::runtime::Runtime); -impl Executor for TokioExecutor { - fn exec(&self, f: Pin + Send>>) { - let _ = self.0.spawn(f); - } -} - /// Builds two nodes that have each other as bootstrap nodes. /// This is to be used only for testing, and a panic will happen if something goes wrong. fn build_nodes() -> (Swarm, Swarm) { @@ -67,13 +62,6 @@ fn build_nodes() -> (Swarm, Swarm) { for index in 0..2 { let keypair = keypairs[index].clone(); - let transport = MemoryTransport::new() - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(yamux::Config::default()) - .timeout(Duration::from_secs(20)) - .boxed(); - let (protocol_handle_pair, mut notif_service) = crate::protocol::notifications::service::notification_service("/foo".into()); // The first swarm has the second peer ID present in the peerstore. @@ -102,39 +90,8 @@ fn build_nodes() -> (Swarm, Swarm) { ); let (notif_handle, command_stream) = protocol_handle_pair.split(); - let behaviour = CustomProtoWithAddr { - inner: Notifications::new( - vec![controller_handle], - from_controller, - NotificationMetrics::new(None), - iter::once(( - ProtocolConfig { - name: "/foo".into(), - fallback_names: Vec::new(), - handshake: Vec::new(), - max_notification_size: 1024 * 1024, - }, - notif_handle, - command_stream, - )), - ), - peer_store_future: peer_store.run().boxed(), - protocol_controller_future: controller.run().boxed(), - addrs: addrs - .iter() - .enumerate() - .filter_map(|(n, a)| { - if n != index { - Some((keypairs[n].public().to_peer_id(), a.clone())) - } else { - None - } - }) - .collect(), - }; - let runtime = tokio::runtime::Runtime::new().unwrap(); - runtime.spawn(async move { + tokio::spawn(async move { loop { if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = notif_service.next_event().await.unwrap() @@ -144,12 +101,49 @@ fn build_nodes() -> (Swarm, Swarm) { } }); - let mut swarm = Swarm::new( - transport, - behaviour, - keypairs[index].public().to_peer_id(), - swarm::Config::with_executor(TokioExecutor(runtime)), - ); + let mut swarm = SwarmBuilder::with_existing_identity(keypair) + .with_tokio() + .with_other_transport(|keypair| { + MemoryTransport::new() + .upgrade(upgrade::Version::V1) + .authenticate(noise::Config::new(&keypair).unwrap()) + .multiplex(yamux::Config::default()) + .timeout(Duration::from_secs(20)) + .boxed() + }) + .unwrap() + .with_behaviour(|_keypair| CustomProtoWithAddr { + inner: Notifications::new( + vec![controller_handle], + from_controller, + NotificationMetrics::new(None), + iter::once(( + ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: Vec::new(), + max_notification_size: 1024 * 1024, + }, + notif_handle, + command_stream, + )), + ), + peer_store_future: peer_store.run().boxed(), + protocol_controller_future: controller.run().boxed(), + addrs: addrs + .iter() + .enumerate() + .filter_map(|(n, a)| { + if n != index { + Some((keypairs[n].public().to_peer_id(), a.clone())) + } else { + None + } + }) + .collect(), + }) + .unwrap() + .build(); swarm.listen_on(addrs[index].clone()).unwrap(); out.push(swarm); } @@ -241,12 +235,18 @@ impl NetworkBehaviour for CustomProtoWithAddr { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { - self.inner - .handle_established_outbound_connection(connection_id, peer, addr, role_override) + self.inner.handle_established_outbound_connection( + connection_id, + peer, + addr, + role_override, + port_use, + ) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.inner.on_swarm_event(event); } @@ -259,19 +259,15 @@ impl NetworkBehaviour for CustomProtoWithAddr { self.inner.on_connection_handler_event(peer_id, connection_id, event); } - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { let _ = self.peer_store_future.poll_unpin(cx); let _ = self.protocol_controller_future.poll_unpin(cx); - self.inner.poll(cx, params) + self.inner.poll(cx) } } -#[test] -fn reconnect_after_disconnect() { +#[tokio::test] +async fn reconnect_after_disconnect() { // We connect two nodes together, then force a disconnect (through the API of the `Service`), // check that the disconnect worked, and finally check whether they successfully reconnect. @@ -288,108 +284,106 @@ fn reconnect_after_disconnect() { let mut service1_state = ServiceState::NotConnected; let mut service2_state = ServiceState::NotConnected; - futures::executor::block_on(async move { - loop { - // Grab next event from services. - let event = { - let s1 = service1.select_next_some(); - let s2 = service2.select_next_some(); - futures::pin_mut!(s1, s2); - match future::select(s1, s2).await { - future::Either::Left((ev, _)) => future::Either::Left(ev), - future::Either::Right((ev, _)) => future::Either::Right(ev), - } - }; - - match event { - future::Either::Left(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolOpen { .. }, - )) => match service1_state { - ServiceState::NotConnected => { - service1_state = ServiceState::FirstConnec; - if service2_state == ServiceState::FirstConnec { - service1 - .behaviour_mut() - .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); - } - }, - ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - }, - future::Either::Left(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolClosed { .. }, - )) => match service1_state { - ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain | - ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - }, - future::Either::Right(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolOpen { .. }, - )) => match service2_state { - ServiceState::NotConnected => { - service2_state = ServiceState::FirstConnec; - if service1_state == ServiceState::FirstConnec { - service1 - .behaviour_mut() - .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); - } - }, - ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - }, - future::Either::Right(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolClosed { .. }, - )) => match service2_state { - ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain | - ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - }, - _ => {}, + loop { + // Grab next event from services. + let event = { + let s1 = service1.select_next_some(); + let s2 = service2.select_next_some(); + futures::pin_mut!(s1, s2); + match future::select(s1, s2).await { + future::Either::Left((ev, _)) => future::Either::Left(ev), + future::Either::Right((ev, _)) => future::Either::Right(ev), } + }; - // Due to the bug in `Notifications`, the disconnected node does not always detect that - // it was disconnected. The closed inbound substream is tolerated by design, and the - // closed outbound substream is not detected until something is sent into it. - // See [PR #13396](https://github.com/paritytech/substrate/pull/13396). - // This happens if the disconnecting node reconnects to it fast enough. - // In this case the disconnected node does not transit via `ServiceState::NotConnected` - // and stays in `ServiceState::FirstConnec`. - // TODO: update this once the fix is finally merged. - if service1_state == ServiceState::ConnectedAgain && - service2_state == ServiceState::ConnectedAgain || - service1_state == ServiceState::ConnectedAgain && - service2_state == ServiceState::FirstConnec || - service1_state == ServiceState::FirstConnec && - service2_state == ServiceState::ConnectedAgain - { - break - } + match event { + future::Either::Left(SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { + .. + })) => match service1_state { + ServiceState::NotConnected => { + service1_state = ServiceState::FirstConnec; + if service2_state == ServiceState::FirstConnec { + service1 + .behaviour_mut() + .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); + } + }, + ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + }, + future::Either::Left(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolClosed { .. }, + )) => match service1_state { + ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + }, + future::Either::Right(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolOpen { .. }, + )) => match service2_state { + ServiceState::NotConnected => { + service2_state = ServiceState::FirstConnec; + if service1_state == ServiceState::FirstConnec { + service1 + .behaviour_mut() + .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); + } + }, + ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + }, + future::Either::Right(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolClosed { .. }, + )) => match service2_state { + ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + }, + _ => {}, } - // Now that the two services have disconnected and reconnected, wait for 3 seconds and - // check whether they're still connected. - let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); - - loop { - // Grab next event from services. - let event = { - let s1 = service1.select_next_some(); - let s2 = service2.select_next_some(); - futures::pin_mut!(s1, s2); - match future::select(future::select(s1, s2), &mut delay).await { - future::Either::Right(_) => break, // success - future::Either::Left((future::Either::Left((ev, _)), _)) => ev, - future::Either::Left((future::Either::Right((ev, _)), _)) => ev, - } - }; + // Due to the bug in `Notifications`, the disconnected node does not always detect that + // it was disconnected. The closed inbound substream is tolerated by design, and the + // closed outbound substream is not detected until something is sent into it. + // See [PR #13396](https://github.com/paritytech/substrate/pull/13396). + // This happens if the disconnecting node reconnects to it fast enough. + // In this case the disconnected node does not transit via `ServiceState::NotConnected` + // and stays in `ServiceState::FirstConnec`. + // TODO: update this once the fix is finally merged. + if service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::ConnectedAgain || + service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::FirstConnec || + service1_state == ServiceState::FirstConnec && + service2_state == ServiceState::ConnectedAgain + { + break + } + } - match event { - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), - _ => {}, + // Now that the two services have disconnected and reconnected, wait for 3 seconds and + // check whether they're still connected. + let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); + + loop { + // Grab next event from services. + let event = { + let s1 = service1.select_next_some(); + let s2 = service2.select_next_some(); + futures::pin_mut!(s1, s2); + match future::select(future::select(s1, s2), &mut delay).await { + future::Either::Right(_) => break, // success + future::Either::Left((future::Either::Left((ev, _)), _)) => ev, + future::Either::Left((future::Either::Right((ev, _)), _)) => ev, } + }; + + match event { + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), + _ => {}, } - }); + } } diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index e01bcbe0bad7..9e8a03fc07c9 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -39,12 +39,12 @@ use crate::types::ProtocolName; use asynchronous_codec::Framed; use bytes::BytesMut; use futures::prelude::*; -use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use log::{error, warn}; use unsigned_varint::codec::UviBytes; use std::{ - io, mem, + fmt, io, mem, pin::Pin, task::{Context, Poll}, vec, @@ -187,6 +187,14 @@ pub struct NotificationsInOpen { pub substream: NotificationsInSubstream, } +impl fmt::Debug for NotificationsInOpen { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NotificationsInOpen") + .field("handshake", &self.handshake) + .finish_non_exhaustive() + } +} + impl NotificationsInSubstream where TSubstream: AsyncRead + AsyncWrite + Unpin, @@ -370,7 +378,14 @@ where fn upgrade_outbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { - upgrade::write_length_prefixed(&mut socket, &self.initial_message).await?; + { + let mut len_data = unsigned_varint::encode::usize_buffer(); + let encoded_len = + unsigned_varint::encode::usize(self.initial_message.len(), &mut len_data).len(); + socket.write_all(&len_data[..encoded_len]).await?; + } + socket.write_all(&self.initial_message).await?; + socket.flush().await?; // Reading handshake. let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; @@ -413,6 +428,15 @@ pub struct NotificationsOutOpen { pub substream: NotificationsOutSubstream, } +impl fmt::Debug for NotificationsOutOpen { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NotificationsOutOpen") + .field("handshake", &self.handshake) + .field("negotiated_fallback", &self.negotiated_fallback) + .finish_non_exhaustive() + } +} + impl Sink> for NotificationsOutSubstream where TSubstream: AsyncRead + AsyncWrite + Unpin, diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index af7adb50907f..11f5321294d0 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -464,7 +464,7 @@ impl ProtocolController { /// maintain connections with such peers. fn on_add_reserved_peer(&mut self, peer_id: PeerId) { if self.reserved_nodes.contains_key(&peer_id) { - warn!( + debug!( target: LOG_TARGET, "Trying to add an already reserved node {peer_id} as reserved on {:?}.", self.set_id, diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs index 6c2631924df4..5fe34c781378 100644 --- a/substrate/client/network/src/request_responses.rs +++ b/substrate/client/network/src/request_responses.rs @@ -43,13 +43,11 @@ use crate::{ use futures::{channel::oneshot, prelude::*}; use libp2p::{ - core::{Endpoint, Multiaddr}, + core::{transport::PortUse, Endpoint, Multiaddr}, request_response::{self, Behaviour, Codec, Message, ProtocolSupport, ResponseChannel}, swarm::{ - behaviour::{ConnectionClosed, FromSwarm}, - handler::multi::MultiHandler, - ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, handler::multi::MultiHandler, ConnectionDenied, ConnectionId, + NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -64,11 +62,11 @@ use std::{ time::{Duration, Instant}, }; -pub use libp2p::request_response::{Config, RequestId}; +pub use libp2p::request_response::{Config, InboundRequestId, OutboundRequestId}; /// Possible failures occurring in the context of sending an outbound request and receiving the /// response. -#[derive(Debug, thiserror::Error)] +#[derive(Debug, Clone, thiserror::Error)] pub enum OutboundFailure { /// The request could not be sent because a dialing attempt failed. #[error("Failed to dial the requested peer")] @@ -82,6 +80,9 @@ pub enum OutboundFailure { /// The remote supports none of the requested protocols. #[error("The remote supports none of the requested protocols")] UnsupportedProtocols, + /// An IO failure happened on an outbound stream. + #[error("An IO failure happened on an outbound stream")] + Io(Arc), } impl From for OutboundFailure { @@ -93,6 +94,7 @@ impl From for OutboundFailure { OutboundFailure::ConnectionClosed, request_response::OutboundFailure::UnsupportedProtocols => OutboundFailure::UnsupportedProtocols, + request_response::OutboundFailure::Io(error) => OutboundFailure::Io(Arc::new(error)), } } } @@ -114,6 +116,9 @@ pub enum InboundFailure { /// The local peer failed to respond to an inbound request #[error("The response channel was dropped without sending a response to the remote")] ResponseOmission, + /// An IO failure happened on an inbound stream. + #[error("An IO failure happened on an inbound stream")] + Io(Arc), } impl From for InboundFailure { @@ -124,6 +129,7 @@ impl From for InboundFailure { request_response::InboundFailure::ConnectionClosed => InboundFailure::ConnectionClosed, request_response::InboundFailure::UnsupportedProtocols => InboundFailure::UnsupportedProtocols, + request_response::InboundFailure::Io(error) => InboundFailure::Io(Arc::new(error)), } } } @@ -319,12 +325,12 @@ pub enum Event { /// requests. There is no uniqueness guarantee in a set of both inbound and outbound /// [`ProtocolRequestId`]s. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -struct ProtocolRequestId { +struct ProtocolRequestId { protocol: ProtocolName, request_id: RequestId, } -impl From<(ProtocolName, RequestId)> for ProtocolRequestId { +impl From<(ProtocolName, RequestId)> for ProtocolRequestId { fn from((protocol, request_id): (ProtocolName, RequestId)) -> Self { Self { protocol, request_id } } @@ -342,7 +348,7 @@ pub struct RequestResponsesBehaviour { >, /// Pending requests, passed down to a request-response [`Behaviour`], awaiting a reply. - pending_requests: HashMap, + pending_requests: HashMap, PendingRequest>, /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the /// start time and the response to send back to the remote. @@ -351,11 +357,11 @@ pub struct RequestResponsesBehaviour { >, /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. - pending_responses_arrival_time: HashMap, + pending_responses_arrival_time: HashMap, Instant>, /// Whenever a response is received on `pending_responses`, insert a channel to be notified /// when the request has been sent out. - send_feedback: HashMap>, + send_feedback: HashMap, oneshot::Sender<()>>, /// Primarily used to get a reputation of a node. peer_store: Arc, @@ -364,7 +370,7 @@ pub struct RequestResponsesBehaviour { /// Generated by the response builder and waiting to be processed. struct RequestProcessingOutcome { peer: PeerId, - request_id: RequestId, + request_id: InboundRequestId, protocol: ProtocolName, inner_channel: ResponseChannel, ()>>, response: OutgoingResponse, @@ -379,8 +385,7 @@ impl RequestResponsesBehaviour { ) -> Result { let mut protocols = HashMap::new(); for protocol in list { - let mut cfg = Config::default(); - cfg.set_request_timeout(protocol.request_timeout); + let cfg = Config::default().with_request_timeout(protocol.request_timeout); let protocol_support = if protocol.inbound_queue.is_some() { ProtocolSupport::Full @@ -455,7 +460,7 @@ impl RequestResponsesBehaviour { fn send_request_inner( behaviour: &mut Behaviour, - pending_requests: &mut HashMap, + pending_requests: &mut HashMap, PendingRequest>, target: &PeerId, protocol_name: ProtocolName, request: Vec, @@ -541,11 +546,16 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { let iter = self.protocols.iter_mut().filter_map(|(p, (r, _))| { - if let Ok(handler) = - r.handle_established_outbound_connection(connection_id, peer, addr, role_override) - { + if let Ok(handler) = r.handle_established_outbound_connection( + connection_id, + peer, + addr, + role_override, + port_use, + ) { Some((p.to_string(), handler)) } else { None @@ -558,80 +568,9 @@ impl NetworkBehaviour for RequestResponsesBehaviour { )) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ConnectionEstablished(e)); - }, - FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - }) => - for (p_name, p_handler) in handler.into_iter() { - if let Some((proto, _)) = self.protocols.get_mut(p_name.as_str()) { - proto.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler: p_handler, - remaining_established, - })); - } else { - log::error!( - target: "sub-libp2p", - "on_swarm_event/connection_closed: no request-response instance registered for protocol {:?}", - p_name, - ) - } - }, - FromSwarm::DialFailure(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::DialFailure(e)); - }, - FromSwarm::ListenerClosed(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenerClosed(e)); - }, - FromSwarm::ListenFailure(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenFailure(e)); - }, - FromSwarm::ListenerError(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenerError(e)); - }, - FromSwarm::ExternalAddrExpired(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrExpired(e)); - }, - FromSwarm::NewListener(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::NewListener(e)); - }, - FromSwarm::ExpiredListenAddr(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ExpiredListenAddr(e)); - }, - FromSwarm::NewExternalAddrCandidate(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::NewExternalAddrCandidate(e)); - }, - FromSwarm::ExternalAddrConfirmed(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrConfirmed(e)); - }, - FromSwarm::AddressChange(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::AddressChange(e)); - }, - FromSwarm::NewListenAddr(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::NewListenAddr(e)); - }, + fn on_swarm_event(&mut self, event: FromSwarm) { + for (protocol, _) in self.protocols.values_mut() { + protocol.on_swarm_event(event); } } @@ -653,11 +592,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { 'poll_all: loop { // Poll to see if any response is ready to be sent back. while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { @@ -707,7 +642,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Poll request-responses protocols. for (protocol, (ref mut behaviour, ref mut resp_builder)) in &mut self.protocols { - 'poll_protocol: while let Poll::Ready(ev) = behaviour.poll(cx, params) { + 'poll_protocol: while let Poll::Ready(ev) = behaviour.poll(cx) { let ev = match ev { // Main events we are interested in. ToSwarm::GenerateEvent(ev) => ev, @@ -717,29 +652,23 @@ impl NetworkBehaviour for RequestResponsesBehaviour { ToSwarm::Dial { opts } => { if opts.get_peer_id().is_none() { log::error!( + target: "sub-libp2p", "The request-response isn't supposed to start dialing addresses" ); } return Poll::Ready(ToSwarm::Dial { opts }) }, - ToSwarm::NotifyHandler { peer_id, handler, event } => - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event: ((*protocol).to_string(), event), - }), - ToSwarm::CloseConnection { peer_id, connection } => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - ToSwarm::NewExternalAddrCandidate(observed) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - ToSwarm::ExternalAddrConfirmed(addr) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - ToSwarm::ExternalAddrExpired(addr) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - ToSwarm::ListenOn { opts } => - return Poll::Ready(ToSwarm::ListenOn { opts }), - ToSwarm::RemoveListener { id } => - return Poll::Ready(ToSwarm::RemoveListener { id }), + event => { + return Poll::Ready( + event.map_in(|event| ((*protocol).to_string(), event)).map_out( + |_| { + unreachable!( + "`GenerateEvent` is handled in a branch above; qed" + ) + }, + ), + ); + }, }; match ev { @@ -859,6 +788,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { error, .. } => { + let error = OutboundFailure::from(error); let started = match self .pending_requests .remove(&(protocol.clone(), request_id).into()) @@ -870,9 +800,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { }) => { // Try using the fallback request if the protocol was not // supported. - if let request_response::OutboundFailure::UnsupportedProtocols = - error - { + if matches!(error, OutboundFailure::UnsupportedProtocols) { if let Some((fallback_request, fallback_protocol)) = fallback_request { @@ -893,7 +821,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } if response_tx - .send(Err(RequestFailure::Network(error.clone().into()))) + .send(Err(RequestFailure::Network(error.clone()))) .is_err() { log::debug!( @@ -920,7 +848,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer, protocol: protocol.clone(), duration: started.elapsed(), - result: Err(RequestFailure::Network(error.into())), + result: Err(RequestFailure::Network(error)), }; return Poll::Ready(ToSwarm::GenerateEvent(out)) @@ -1184,7 +1112,10 @@ mod tests { transport, behaviour, keypair.public().to_peer_id(), - SwarmConfig::with_executor(TokioExecutor(runtime)), + SwarmConfig::with_executor(TokioExecutor(runtime)) + // This is taken care of by notification protocols in non-test environment + // It is very slow in test environment for some reason, hence larger timeout + .with_idle_connection_timeout(Duration::from_secs(10)), ); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); @@ -1354,7 +1285,9 @@ mod tests { match swarm.select_next_some().await { SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { assert!(result.is_ok()); - break + }, + SwarmEvent::ConnectionClosed { .. } => { + break; }, _ => {}, } @@ -1394,20 +1327,20 @@ mod tests { } match response_receiver.unwrap().await.unwrap().unwrap_err() { - RequestFailure::Network(OutboundFailure::ConnectionClosed) => {}, - _ => panic!(), + RequestFailure::Network(OutboundFailure::Io(_)) => {}, + request_failure => panic!("Unexpected failure: {request_failure:?}"), } }); } - /// A [`RequestId`] is a unique identifier among either all inbound or all outbound requests for + /// A `RequestId` is a unique identifier among either all inbound or all outbound requests for /// a single [`RequestResponsesBehaviour`] behaviour. It is not guaranteed to be unique across - /// multiple [`RequestResponsesBehaviour`] behaviours. Thus when handling [`RequestId`] in the + /// multiple [`RequestResponsesBehaviour`] behaviours. Thus, when handling `RequestId` in the /// context of multiple [`RequestResponsesBehaviour`] behaviours, one needs to couple the - /// protocol name with the [`RequestId`] to get a unique request identifier. + /// protocol name with the `RequestId` to get a unique request identifier. /// /// This test ensures that two requests on different protocols can be handled concurrently - /// without a [`RequestId`] collision. + /// without a `RequestId` collision. /// /// See [`ProtocolRequestId`] for additional information. #[test] diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 5e5e4ee28589..751183ae19a9 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -41,7 +41,7 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, peer_store::{PeerStore, PeerStoreProvider}, - protocol::{self, NotifsHandlerError, Protocol, Ready}, + protocol::{self, Protocol, Ready}, protocol_controller::{self, ProtoSetConfig, ProtocolController, SetId}, request_responses::{IfDisconnected, ProtocolConfig as RequestResponseConfig, RequestFailure}, service::{ @@ -59,10 +59,7 @@ use crate::{ }; use codec::DecodeAll; -use either::Either; use futures::{channel::oneshot, prelude::*}; -#[allow(deprecated)] -use libp2p::swarm::THandlerErr; use libp2p::{ connection_limits::{ConnectionLimits, Exceeded}, core::{upgrade, ConnectedPoint, Endpoint}, @@ -94,7 +91,6 @@ pub use libp2p::identity::{DecodingError, Keypair, PublicKey}; pub use metrics::NotificationMetrics; pub use protocol::NotificationsSink; use std::{ - cmp, collections::{HashMap, HashSet}, fs, iter, marker::PhantomData, @@ -115,6 +111,7 @@ pub mod signature; pub mod traits; struct Libp2pBandwidthSink { + #[allow(deprecated)] sink: Arc, } @@ -336,7 +333,7 @@ where "🏷 Local node identity is: {}", local_peer_id.to_base58(), ); - log::info!(target: "sub-libp2p", "Running libp2p network backend"); + info!(target: "sub-libp2p", "Running libp2p network backend"); let (transport, bandwidth) = { let config_mem = match network_config.transport { @@ -344,46 +341,7 @@ where TransportConfig::Normal { .. } => false, }; - // The yamux buffer size limit is configured to be equal to the maximum frame size - // of all protocols. 10 bytes are added to each limit for the length prefix that - // is not included in the upper layer protocols limit but is still present in the - // yamux buffer. These 10 bytes correspond to the maximum size required to encode - // a variable-length-encoding 64bits number. In other words, we make the - // assumption that no notification larger than 2^64 will ever be sent. - let yamux_maximum_buffer_size = { - let requests_max = request_response_protocols - .iter() - .map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::MAX)); - let responses_max = request_response_protocols - .iter() - .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX)); - let notifs_max = notification_protocols - .iter() - .map(|cfg| usize::try_from(cfg.max_notification_size()).unwrap_or(usize::MAX)); - - // A "default" max is added to cover all the other protocols: ping, identify, - // kademlia, block announces, and transactions. - let default_max = cmp::max( - 1024 * 1024, - usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE) - .unwrap_or(usize::MAX), - ); - - iter::once(default_max) - .chain(requests_max) - .chain(responses_max) - .chain(notifs_max) - .max() - .expect("iterator known to always yield at least one element; qed") - .saturating_add(10) - }; - - transport::build_transport( - local_identity.clone().into(), - config_mem, - network_config.yamux_window_size, - yamux_maximum_buffer_size, - ) + transport::build_transport(local_identity.clone().into(), config_mem) }; let (to_notifications, from_protocol_controllers) = @@ -973,6 +931,18 @@ where expires, )); } + + fn start_providing(&self, key: KademliaKey) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::StartProviding(key)); + } + + fn stop_providing(&self, key: KademliaKey) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::StopProviding(key)); + } + + fn get_providers(&self, key: KademliaKey) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetProviders(key)); + } } #[async_trait::async_trait] @@ -1333,6 +1303,9 @@ enum ServiceToWorkerMsg { update_local_storage: bool, }, StoreRecord(KademliaKey, Vec, Option, Option), + StartProviding(KademliaKey), + StopProviding(KademliaKey), + GetProviders(KademliaKey), AddKnownAddress(PeerId, Multiaddr), EventStream(out_events::Sender), Request { @@ -1466,6 +1439,12 @@ where .network_service .behaviour_mut() .store_record(key.into(), value, publisher, expires), + ServiceToWorkerMsg::StartProviding(key) => + self.network_service.behaviour_mut().start_providing(key.into()), + ServiceToWorkerMsg::StopProviding(key) => + self.network_service.behaviour_mut().stop_providing(&key.into()), + ServiceToWorkerMsg::GetProviders(key) => + self.network_service.behaviour_mut().get_providers(key.into()), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => self.network_service.behaviour_mut().add_known_address(peer_id, addr), ServiceToWorkerMsg::EventStream(sender) => self.event_streams.push(sender), @@ -1501,8 +1480,7 @@ where } /// Process the next event coming from `Swarm`. - #[allow(deprecated)] - fn handle_swarm_event(&mut self, event: SwarmEvent>>) { + fn handle_swarm_event(&mut self, event: SwarmEvent) { match event { SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. }) => { if let Some(metrics) = self.metrics.as_ref() { @@ -1527,6 +1505,7 @@ where Some("busy-omitted"), ResponseFailure::Network(InboundFailure::ConnectionClosed) => Some("connection-closed"), + ResponseFailure::Network(InboundFailure::Io(_)) => Some("io"), }; if let Some(reason) = reason { @@ -1566,6 +1545,7 @@ where "connection-closed", RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => "unsupported", + RequestFailure::Network(OutboundFailure::Io(_)) => "io", }; metrics @@ -1678,6 +1658,9 @@ where DhtEvent::ValuePut(_) => "value-put", DhtEvent::ValuePutFailed(_) => "value-put-failed", DhtEvent::PutRecordRequest(_, _, _, _) => "put-record-request", + DhtEvent::StartProvidingFailed(_) => "start-providing-failed", + DhtEvent::ProvidersFound(_, _) => "providers-found", + DhtEvent::ProvidersNotFound(_) => "providers-not-found", }; metrics .kademlia_query_duration @@ -1732,15 +1715,6 @@ where }; let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", - Some(ConnectionError::Handler(Either::Left(Either::Left( - Either::Left(Either::Right( - NotifsHandlerError::SyncNotificationsClogged, - )), - )))) => "sync-notifications-clogged", - Some(ConnectionError::Handler(Either::Left(Either::Left( - Either::Right(Either::Left(_)), - )))) => "ping-timeout", - Some(ConnectionError::Handler(_)) => "protocol-error", Some(ConnectionError::KeepAliveTimeout) => "keep-alive-timeout", None => "actively-closed", }; @@ -1779,7 +1753,12 @@ where not_reported.then(|| self.boot_node_ids.get(&peer_id)).flatten() { if let DialError::WrongPeerId { obtained, endpoint } = &error { - if let ConnectedPoint::Dialer { address, role_override: _ } = endpoint { + if let ConnectedPoint::Dialer { + address, + role_override: _, + port_use: _, + } = endpoint + { let address_without_peer_id = parse_addr(address.clone().into()) .map_or_else(|_| address.clone(), |r| r.1.into()); @@ -1800,7 +1779,6 @@ where } if let Some(metrics) = self.metrics.as_ref() { - #[allow(deprecated)] let reason = match error { DialError::Denied { cause } => if cause.downcast::().is_ok() { @@ -1840,7 +1818,6 @@ where "Libp2p => IncomingConnectionError({local_addr},{send_back_addr} via {connection_id:?}): {error}" ); if let Some(metrics) = self.metrics.as_ref() { - #[allow(deprecated)] let reason = match error { ListenError::Denied { cause } => if cause.downcast::().is_ok() { @@ -1893,6 +1870,21 @@ where metrics.listeners_errors_total.inc(); } }, + SwarmEvent::NewExternalAddrCandidate { address } => { + trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrCandidate: {address:?}"); + }, + SwarmEvent::ExternalAddrConfirmed { address } => { + trace!(target: "sub-libp2p", "Libp2p => ExternalAddrConfirmed: {address:?}"); + }, + SwarmEvent::ExternalAddrExpired { address } => { + trace!(target: "sub-libp2p", "Libp2p => ExternalAddrExpired: {address:?}"); + }, + SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => { + trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrOfPeer({peer_id:?}): {address:?}") + }, + event => { + warn!(target: "sub-libp2p", "New unknown SwarmEvent libp2p event: {event:?}"); + }, } } } diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs index f5dd2995acb1..acfed9ea894c 100644 --- a/substrate/client/network/src/service/traits.rs +++ b/substrate/client/network/src/service/traits.rs @@ -234,6 +234,15 @@ pub trait NetworkDHTProvider { publisher: Option, expires: Option, ); + + /// Register this node as a provider for `key` on the DHT. + fn start_providing(&self, key: KademliaKey); + + /// Deregister this node as a provider for `key` on the DHT. + fn stop_providing(&self, key: KademliaKey); + + /// Start getting the list of providers for `key` on the DHT. + fn get_providers(&self, key: KademliaKey); } impl NetworkDHTProvider for Arc @@ -262,6 +271,18 @@ where ) { T::store_record(self, key, value, publisher, expires) } + + fn start_providing(&self, key: KademliaKey) { + T::start_providing(self, key) + } + + fn stop_providing(&self, key: KademliaKey) { + T::stop_providing(self, key) + } + + fn get_providers(&self, key: KademliaKey) { + T::get_providers(self, key) + } } /// Provides an ability to set a fork sync request for a particular block. diff --git a/substrate/client/network/src/transport.rs b/substrate/client/network/src/transport.rs index ed7e7c574e16..2f6b7a643c48 100644 --- a/substrate/client/network/src/transport.rs +++ b/substrate/client/network/src/transport.rs @@ -29,6 +29,8 @@ use libp2p::{ }; use std::{sync::Arc, time::Duration}; +// TODO: Create a wrapper similar to upstream `BandwidthTransport` that tracks sent/received bytes +#[allow(deprecated)] pub use libp2p::bandwidth::BandwidthSinks; /// Builds the transport that serves as a common ground for all connections. @@ -36,21 +38,12 @@ pub use libp2p::bandwidth::BandwidthSinks; /// If `memory_only` is true, then only communication within the same process are allowed. Only /// addresses with the format `/memory/...` are allowed. /// -/// `yamux_window_size` is the maximum size of the Yamux receive windows. `None` to leave the -/// default (256kiB). -/// -/// `yamux_maximum_buffer_size` is the maximum allowed size of the Yamux buffer. This should be -/// set either to the maximum of all the maximum allowed sizes of messages frames of all -/// high-level protocols combined, or to some generously high value if you are sure that a maximum -/// size is enforced on all high-level protocols. -/// /// Returns a `BandwidthSinks` object that allows querying the average bandwidth produced by all /// the connections spawned with this transport. +#[allow(deprecated)] pub fn build_transport( keypair: identity::Keypair, memory_only: bool, - yamux_window_size: Option, - yamux_maximum_buffer_size: usize, ) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) { // Build the base layer of the transport. let transport = if !memory_only { @@ -81,19 +74,7 @@ pub fn build_transport( }; let authentication_config = noise::Config::new(&keypair).expect("Can create noise config. qed"); - let multiplexing_config = { - let mut yamux_config = libp2p::yamux::Config::default(); - // Enable proper flow-control: window updates are only sent when - // buffered data has been consumed. - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); - yamux_config.set_max_buffer_size(yamux_maximum_buffer_size); - - if let Some(yamux_window_size) = yamux_window_size { - yamux_config.set_receive_window_size(yamux_window_size); - } - - yamux_config - }; + let multiplexing_config = libp2p::yamux::Config::default(); let transport = transport .upgrade(upgrade::Version::V1Lazy) diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index cc2089d1974c..0c39ea0b93c0 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -100,6 +100,8 @@ mod rep { pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); /// Reputation change when a peer doesn't respond in time to our messages. pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); + /// Reputation change when a peer connection failed with IO error. + pub const IO: Rep = Rep::new(-(1 << 10), "IO error during request"); } struct Metrics { @@ -545,7 +547,14 @@ where self.process_service_command(command), notification_event = self.notification_service.next_event() => match notification_event { Some(event) => self.process_notification_event(event), - None => return, + None => { + error!( + target: LOG_TARGET, + "Terminating `SyncingEngine` because `NotificationService` has terminated.", + ); + + return; + } }, response_event = self.pending_responses.select_next_some() => self.process_response_event(response_event), @@ -1012,9 +1021,14 @@ where debug_assert!( false, "Can not receive `RequestFailure::Obsolete` after dropping the \ - response receiver.", + response receiver.", ); }, + RequestFailure::Network(OutboundFailure::Io(_)) => { + self.network_service.report_peer(peer_id, rep::IO); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, } }, Err(oneshot::Canceled) => { diff --git a/substrate/client/network/sync/src/strategy/state_sync.rs b/substrate/client/network/sync/src/strategy/state_sync.rs index 7a0cc1191609..47d859a1b7c6 100644 --- a/substrate/client/network/sync/src/strategy/state_sync.rs +++ b/substrate/client/network/sync/src/strategy/state_sync.rs @@ -89,22 +89,62 @@ pub enum ImportResult { BadResponse, } -/// State sync state machine. Accumulates partial state data until it -/// is ready to be imported. -pub struct StateSync { - target_block: B::Hash, +struct StateSyncMetadata { + last_key: SmallVec<[Vec; 2]>, target_header: B::Header, - target_root: B::Hash, target_body: Option>, target_justifications: Option, - last_key: SmallVec<[Vec; 2]>, - state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, complete: bool, - client: Arc, imported_bytes: u64, skip_proof: bool, } +impl StateSyncMetadata { + fn target_hash(&self) -> B::Hash { + self.target_header.hash() + } + + /// Returns target block number. + fn target_number(&self) -> NumberFor { + *self.target_header.number() + } + + fn target_root(&self) -> B::Hash { + *self.target_header.state_root() + } + + fn next_request(&self) -> StateRequest { + StateRequest { + block: self.target_hash().encode(), + start: self.last_key.clone().into_vec(), + no_proof: self.skip_proof, + } + } + + fn progress(&self) -> StateSyncProgress { + let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); + let percent_done = cursor as u32 * 100 / 256; + StateSyncProgress { + percentage: percent_done, + size: self.imported_bytes, + phase: if self.complete { + StateSyncPhase::ImportingState + } else { + StateSyncPhase::DownloadingState + }, + } + } +} + +/// State sync state machine. +/// +/// Accumulates partial state data until it is ready to be imported. +pub struct StateSync { + metadata: StateSyncMetadata, + state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, + client: Arc, +} + impl StateSync where B: BlockT, @@ -120,16 +160,16 @@ where ) -> Self { Self { client, - target_block: target_header.hash(), - target_root: *target_header.state_root(), - target_header, - target_body, - target_justifications, - last_key: SmallVec::default(), + metadata: StateSyncMetadata { + last_key: SmallVec::default(), + target_header, + target_body, + target_justifications, + complete: false, + imported_bytes: 0, + skip_proof, + }, state: HashMap::default(), - complete: false, - imported_bytes: 0, - skip_proof, } } @@ -155,7 +195,7 @@ where if is_top && well_known_keys::is_child_storage_key(key.as_slice()) { child_storage_roots.push((value, key)); } else { - self.imported_bytes += key.len() as u64; + self.metadata.imported_bytes += key.len() as u64; entry.0.push((key, value)); } } @@ -177,11 +217,11 @@ where // the parent cursor stays valid. // Empty parent trie content only happens when all the response content // is part of a single child trie. - if self.last_key.len() == 2 && response.entries[0].entries.is_empty() { + if self.metadata.last_key.len() == 2 && response.entries[0].entries.is_empty() { // Do not remove the parent trie position. - self.last_key.pop(); + self.metadata.last_key.pop(); } else { - self.last_key.clear(); + self.metadata.last_key.clear(); } for state in response.entries { debug!( @@ -193,7 +233,7 @@ where if !state.complete { if let Some(e) = state.entries.last() { - self.last_key.push(e.key.clone()); + self.metadata.last_key.push(e.key.clone()); } complete = false; } @@ -219,11 +259,11 @@ where debug!(target: LOG_TARGET, "Bad state response"); return ImportResult::BadResponse } - if !self.skip_proof && response.proof.is_empty() { + if !self.metadata.skip_proof && response.proof.is_empty() { debug!(target: LOG_TARGET, "Missing proof"); return ImportResult::BadResponse } - let complete = if !self.skip_proof { + let complete = if !self.metadata.skip_proof { debug!(target: LOG_TARGET, "Importing state from {} trie nodes", response.proof.len()); let proof_size = response.proof.len() as u64; let proof = match CompactProof::decode(&mut response.proof.as_ref()) { @@ -234,9 +274,9 @@ where }, }; let (values, completed) = match self.client.verify_range_proof( - self.target_root, + self.metadata.target_root(), proof, - self.last_key.as_slice(), + self.metadata.last_key.as_slice(), ) { Err(e) => { debug!( @@ -251,27 +291,25 @@ where debug!(target: LOG_TARGET, "Imported with {} keys", values.len()); let complete = completed == 0; - if !complete && !values.update_last_key(completed, &mut self.last_key) { + if !complete && !values.update_last_key(completed, &mut self.metadata.last_key) { debug!(target: LOG_TARGET, "Error updating key cursor, depth: {}", completed); }; self.process_state_verified(values); - self.imported_bytes += proof_size; + self.metadata.imported_bytes += proof_size; complete } else { self.process_state_unverified(response) }; if complete { - self.complete = true; + self.metadata.complete = true; + let target_hash = self.metadata.target_hash(); ImportResult::Import( - self.target_block, - self.target_header.clone(), - ImportedState { - block: self.target_block, - state: std::mem::take(&mut self.state).into(), - }, - self.target_body.clone(), - self.target_justifications.clone(), + target_hash, + self.metadata.target_header.clone(), + ImportedState { block: target_hash, state: std::mem::take(&mut self.state).into() }, + self.metadata.target_body.clone(), + self.metadata.target_justifications.clone(), ) } else { ImportResult::Continue @@ -280,40 +318,26 @@ where /// Produce next state request. fn next_request(&self) -> StateRequest { - StateRequest { - block: self.target_block.encode(), - start: self.last_key.clone().into_vec(), - no_proof: self.skip_proof, - } + self.metadata.next_request() } /// Check if the state is complete. fn is_complete(&self) -> bool { - self.complete + self.metadata.complete } /// Returns target block number. fn target_number(&self) -> NumberFor { - *self.target_header.number() + self.metadata.target_number() } /// Returns target block hash. fn target_hash(&self) -> B::Hash { - self.target_block + self.metadata.target_hash() } /// Returns state sync estimated progress. fn progress(&self) -> StateSyncProgress { - let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); - let percent_done = cursor as u32 * 100 / 256; - StateSyncProgress { - percentage: percent_done, - size: self.imported_bytes, - phase: if self.complete { - StateSyncPhase::ImportingState - } else { - StateSyncPhase::DownloadingState - }, - } + self.metadata.progress() } } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index ebece1762f29..6340d1dfb2f4 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -33,7 +33,7 @@ sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 825481314c67..3cdf211e07f6 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -91,7 +91,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justification, Justifications, }; -use substrate_test_runtime_client::AccountKeyring; +use substrate_test_runtime_client::Sr25519Keyring; pub use substrate_test_runtime_client::{ runtime::{Block, ExtrinsicBuilder, Hash, Header, Transfer}, TestClient, TestClientBuilder, TestClientBuilderExt, @@ -475,8 +475,8 @@ where BlockOrigin::File, |mut builder| { let transfer = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Alice.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Alice.into(), amount: 1, nonce, }; diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index 7438eaeffcd2..67814f135d39 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -14,7 +14,7 @@ bs58 = { workspace = true, default-features = true } bytes = { version = "1.4.0", default-features = false } ed25519-dalek = { workspace = true, default-features = true } libp2p-identity = { features = ["ed25519", "peerid", "rand"], workspace = true } -libp2p-kad = { version = "0.44.6", default-features = false } +libp2p-kad = { version = "0.46.2", default-features = false } litep2p = { workspace = true } log = { workspace = true, default-features = true } multiaddr = { workspace = true } diff --git a/substrate/client/proposer-metrics/src/lib.rs b/substrate/client/proposer-metrics/src/lib.rs index 2856300cf802..a62278988f12 100644 --- a/substrate/client/proposer-metrics/src/lib.rs +++ b/substrate/client/proposer-metrics/src/lib.rs @@ -44,7 +44,7 @@ impl MetricsLink { } /// The reason why proposing a block ended. -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum EndProposingReason { NoMoreTransactions, HitDeadline, diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index 31e4042d81f2..4234ff3196ef 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -144,11 +144,56 @@ where local_addrs.push(local_addr); let cfg = cfg.clone(); - let mut id_provider2 = id_provider.clone(); + let RpcSettings { + batch_config, + max_connections, + max_payload_in_mb, + max_payload_out_mb, + max_buffer_capacity_per_connection, + max_subscriptions_per_connection, + rpc_methods, + rate_limit_trust_proxy_headers, + rate_limit_whitelisted_ips, + host_filter, + cors, + rate_limit, + } = listener.rpc_settings(); + + let http_middleware = tower::ServiceBuilder::new() + .option_layer(host_filter) + // Proxy `GET /health, /health/readiness` requests to the internal + // `system_health` method. + .layer(NodeHealthProxyLayer::default()) + .layer(cors); + + let mut builder = jsonrpsee::server::Server::builder() + .max_request_body_size(max_payload_in_mb.saturating_mul(MEGABYTE)) + .max_response_body_size(max_payload_out_mb.saturating_mul(MEGABYTE)) + .max_connections(max_connections) + .max_subscriptions_per_connection(max_subscriptions_per_connection) + .enable_ws_ping( + PingConfig::new() + .ping_interval(Duration::from_secs(30)) + .inactive_limit(Duration::from_secs(60)) + .max_failures(3), + ) + .set_http_middleware(http_middleware) + .set_message_buffer_capacity(max_buffer_capacity_per_connection) + .set_batch_request_config(batch_config) + .custom_tokio_runtime(cfg.tokio_handle.clone()); + + if let Some(provider) = id_provider.clone() { + builder = builder.set_id_provider(provider); + } else { + builder = builder.set_id_provider(RandomStringIdProvider::new(16)); + }; + + let service_builder = builder.to_service_builder(); + let deny_unsafe = deny_unsafe(&local_addr, &rpc_methods); tokio_handle.spawn(async move { loop { - let (sock, remote_addr, rpc_cfg) = tokio::select! { + let (sock, remote_addr) = tokio::select! { res = listener.accept() => { match res { Ok(s) => s, @@ -161,57 +206,10 @@ where _ = cfg.stop_handle.clone().shutdown() => break, }; - let RpcSettings { - batch_config, - max_connections, - max_payload_in_mb, - max_payload_out_mb, - max_buffer_capacity_per_connection, - max_subscriptions_per_connection, - rpc_methods, - rate_limit_trust_proxy_headers, - rate_limit_whitelisted_ips, - host_filter, - cors, - rate_limit, - } = rpc_cfg; - - let http_middleware = tower::ServiceBuilder::new() - .option_layer(host_filter) - // Proxy `GET /health, /health/readiness` requests to the internal - // `system_health` method. - .layer(NodeHealthProxyLayer::default()) - .layer(cors); - - let mut builder = jsonrpsee::server::Server::builder() - .max_request_body_size(max_payload_in_mb.saturating_mul(MEGABYTE)) - .max_response_body_size(max_payload_out_mb.saturating_mul(MEGABYTE)) - .max_connections(max_connections) - .max_subscriptions_per_connection(max_subscriptions_per_connection) - .enable_ws_ping( - PingConfig::new() - .ping_interval(Duration::from_secs(30)) - .inactive_limit(Duration::from_secs(60)) - .max_failures(3), - ) - .set_http_middleware(http_middleware) - .set_message_buffer_capacity(max_buffer_capacity_per_connection) - .set_batch_request_config(batch_config) - .custom_tokio_runtime(cfg.tokio_handle.clone()) - .set_id_provider(RandomStringIdProvider::new(16)); - - if let Some(provider) = id_provider2.take() { - builder = builder.set_id_provider(provider); - } else { - builder = builder.set_id_provider(RandomStringIdProvider::new(16)); - }; - - let service_builder = builder.to_service_builder(); - let deny_unsafe = deny_unsafe(&local_addr, &rpc_methods); - let ip = remote_addr.ip(); let cfg2 = cfg.clone(); let service_builder2 = service_builder.clone(); + let rate_limit_whitelisted_ips2 = rate_limit_whitelisted_ips.clone(); let svc = tower::service_fn(move |mut req: http::Request| { @@ -224,14 +222,14 @@ where let proxy_ip = if rate_limit_trust_proxy_headers { get_proxy_ip(&req) } else { None }; - let rate_limit_cfg = if rate_limit_whitelisted_ips + let rate_limit_cfg = if rate_limit_whitelisted_ips2 .iter() .any(|ips| ips.contains(proxy_ip.unwrap_or(ip))) { log::debug!(target: "rpc", "ip={ip}, proxy_ip={:?} is trusted, disabling rate-limit", proxy_ip); None } else { - if !rate_limit_whitelisted_ips.is_empty() { + if !rate_limit_whitelisted_ips2.is_empty() { log::debug!(target: "rpc", "ip={ip}, proxy_ip={:?} is not trusted, rate-limit enabled", proxy_ip); } rate_limit diff --git a/substrate/client/rpc-servers/src/utils.rs b/substrate/client/rpc-servers/src/utils.rs index 51cce6224298..b76cfced3401 100644 --- a/substrate/client/rpc-servers/src/utils.rs +++ b/substrate/client/rpc-servers/src/utils.rs @@ -176,17 +176,19 @@ pub(crate) struct Listener { impl Listener { /// Accepts a new connection. - pub(crate) async fn accept( - &mut self, - ) -> std::io::Result<(tokio::net::TcpStream, SocketAddr, RpcSettings)> { + pub(crate) async fn accept(&mut self) -> std::io::Result<(tokio::net::TcpStream, SocketAddr)> { let (sock, remote_addr) = self.listener.accept().await?; - Ok((sock, remote_addr, self.cfg.clone())) + Ok((sock, remote_addr)) } /// Returns the local address the listener is bound to. pub fn local_addr(&self) -> SocketAddr { self.local_addr } + + pub fn rpc_settings(&self) -> RpcSettings { + self.cfg.clone() + } } pub(crate) fn host_filtering(enabled: bool, addr: SocketAddr) -> Option { diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index daa805912fb9..70f68436767f 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -42,6 +42,7 @@ log = { workspace = true, default-features = true } futures-util = { workspace = true } rand = { workspace = true, default-features = true } schnellru = { workspace = true } +itertools = { workspace = true } [dev-dependencies] async-trait = { workspace = true } @@ -55,7 +56,7 @@ sp-consensus = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } assert_matches = { workspace = true } pretty_assertions = { workspace = true } diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index b19738304000..a205d0502c93 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -19,7 +19,9 @@ //! API trait of the archive methods. use crate::{ - common::events::{ArchiveStorageResult, PaginatedStorageQuery}, + common::events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, MethodResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; @@ -97,11 +99,32 @@ pub trait ArchiveApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "archive_unstable_storage", blocking)] + #[subscription( + name = "archive_unstable_storage" => "archive_unstable_storageEvent", + unsubscribe = "archive_unstable_stopStorage", + item = ArchiveStorageEvent, + )] fn archive_unstable_storage( &self, hash: Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult; + ); + + /// Returns the storage difference between two blocks. + /// + /// # Unstable + /// + /// This method is unstable and can change in minor or patch releases. + #[subscription( + name = "archive_unstable_storageDiff" => "archive_unstable_storageDiffEvent", + unsubscribe = "archive_unstable_storageDiff_stopStorageDiff", + item = ArchiveStorageDiffEvent, + )] + fn archive_unstable_storage_diff( + &self, + hash: Hash, + items: Vec>, + previous_hash: Option, + ); } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index dd6c566a76ed..62e44a016241 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -19,17 +19,29 @@ //! API implementation for `archive`. use crate::{ - archive::{error::Error as ArchiveError, ArchiveApiServer}, - common::events::{ArchiveStorageResult, PaginatedStorageQuery}, - hex_string, MethodResult, + archive::{ + archive_storage::ArchiveStorageDiff, error::Error as ArchiveError, ArchiveApiServer, + }, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, + storage::{QueryResult, StorageSubscriptionClient}, + }, + hex_string, MethodResult, SubscriptionTaskExecutor, }; use codec::Encode; -use jsonrpsee::core::{async_trait, RpcResult}; +use futures::FutureExt; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + PendingSubscriptionSink, +}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey, StorageProvider, }; +use sc_rpc::utils::Subscription; use sp_api::{CallApiAt, CallContext}; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, @@ -41,37 +53,15 @@ use sp_runtime::{ }; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; -use super::archive_storage::ArchiveStorage; - -/// The configuration of [`Archive`]. -pub struct ArchiveConfig { - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - pub max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - pub max_queried_items: usize, -} +use tokio::sync::mpsc; -/// The maximum number of items the `archive_storage` can return for a descendant query before -/// pagination is required. -/// -/// Note: this is identical to the `chainHead` value. -const MAX_DESCENDANT_RESPONSES: usize = 5; +pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; -/// The maximum number of queried items allowed for the `archive_storage` at a time. +/// The buffer capacity for each storage query. /// -/// Note: A queried item can also be a descendant query which can return up to -/// `MAX_DESCENDANT_RESPONSES`. -const MAX_QUERIED_ITEMS: usize = 8; - -impl Default for ArchiveConfig { - fn default() -> Self { - Self { - max_descendant_responses: MAX_DESCENDANT_RESPONSES, - max_queried_items: MAX_QUERIED_ITEMS, - } - } -} +/// This is small because the underlying JSON-RPC server has +/// its down buffer capacity per connection as well. +const STORAGE_QUERY_BUF: usize = 16; /// An API for archive RPC calls. pub struct Archive, Block: BlockT, Client> { @@ -79,13 +69,10 @@ pub struct Archive, Block: BlockT, Client> { client: Arc, /// Backend of the chain. backend: Arc, + /// Executor to spawn subscriptions. + executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -96,17 +83,10 @@ impl, Block: BlockT, Client> Archive { client: Arc, backend: Arc, genesis_hash: GenesisHash, - config: ArchiveConfig, + executor: SubscriptionTaskExecutor, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { - client, - backend, - genesis_hash, - storage_max_descendant_responses: config.max_descendant_responses, - storage_max_queried_items: config.max_queried_items, - _phantom: PhantomData, - } + Self { client, backend, executor, genesis_hash, _phantom: PhantomData } } } @@ -236,46 +216,157 @@ where fn archive_unstable_storage( &self, + pending: PendingSubscriptionSink, hash: Block::Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult { - let items = items - .into_iter() - .map(|query| { - let key = StorageKey(parse_hex_param(query.key)?); - let pagination_start_key = query - .pagination_start_key - .map(|key| parse_hex_param(key).map(|key| StorageKey(key))) - .transpose()?; - - // Paginated start key is only supported - if pagination_start_key.is_some() && !query.query_type.is_descendant_query() { - return Err(ArchiveError::InvalidParam( - "Pagination start key is only supported for descendants queries" - .to_string(), - )) + ) { + let mut storage_client = + StorageSubscriptionClient::::new(self.client.clone()); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; + + let items = match items + .into_iter() + .map(|query| { + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) + }) + .collect::, ArchiveError>>() + { + Ok(items) => items, + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; + + let child_trie = child_trie.map(|child_trie| parse_hex_param(child_trie)).transpose(); + let child_trie = match child_trie { + Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = storage_client.generate_events(hash, items, child_trie, tx); + + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = futures::future::join(storage_fut, process_storage_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + } + + fn archive_unstable_storage_diff( + &self, + pending: PendingSubscriptionSink, + hash: Block::Hash, + items: Vec>, + previous_hash: Option, + ) { + let storage_client = ArchiveStorageDiff::new(self.client.clone()); + let client = self.client.clone(); + + log::trace!(target: LOG_TARGET, "Storage diff subscription started"); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; + + let previous_hash = if let Some(previous_hash) = previous_hash { + previous_hash + } else { + let Ok(Some(current_header)) = client.header(hash) else { + let message = format!("Block header is not present: {hash}"); + let _ = sink.send(&ArchiveStorageDiffEvent::err(message)).await; + return + }; + *current_header.parent_hash() + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = + storage_client.handle_trie_queries(hash, items, previous_hash, tx.clone()); + + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = + futures::future::join(storage_fut, process_storage_diff_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + } +} + +/// Sends all the events of the storage_diff method to the sink. +async fn process_storage_diff_events( + rx: &mut mpsc::Receiver, + sink: &mut Subscription, +) { + loop { + tokio::select! { + _ = sink.closed() => { + return + }, + + maybe_event = rx.recv() => { + let Some(event) = maybe_event else { + break; + }; + + if event.is_done() { + log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); + } else if event.is_err() { + log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); } - Ok(PaginatedStorageQuery { - key, - query_type: query.query_type, - pagination_start_key, - }) - }) - .collect::, ArchiveError>>()?; + if sink.send(&event).await.is_err() { + return + } + } + } + } +} - let child_trie = child_trie - .map(|child_trie| parse_hex_param(child_trie)) - .transpose()? - .map(ChildInfo::new_default_from_vec); +/// Sends all the events of the storage method to the sink. +async fn process_storage_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { + loop { + tokio::select! { + _ = sink.closed() => { + break + } + + maybe_storage = rx.recv() => { + let Some(event) = maybe_storage else { + break; + }; - let storage_client = ArchiveStorage::new( - self.client.clone(), - self.storage_max_descendant_responses, - self.storage_max_queried_items, - ); + match event { + Ok(None) => continue, - Ok(storage_client.handle_query(hash, items, child_trie)) + Ok(Some(event)) => + if sink.send(&ArchiveStorageEvent::result(event)).await.is_err() { + return + }, + + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error)).await; + return + } + } + } + } } + + let _ = sink.send(&ArchiveStorageEvent::StorageDone).await; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 26e7c299de41..390db765a48f 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -18,112 +18,832 @@ //! Implementation of the `archive_storage` method. -use std::sync::Arc; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; +use itertools::Itertools; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; -use crate::common::{ - events::{ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType}, - storage::{IterQueryType, QueryIter, Storage}, +use super::error::Error as ArchiveError; +use crate::{ + archive::archive::LOG_TARGET, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, StorageResult, + }, + storage::Storage, + }, }; +use tokio::sync::mpsc; + +/// Parse hex-encoded string parameter as raw bytes. +/// +/// If the parsing fails, returns an error propagated to the RPC method. +pub fn parse_hex_param(param: String) -> Result, ArchiveError> { + // Methods can accept empty parameters. + if param.is_empty() { + return Ok(Default::default()) + } + + array_bytes::hex2bytes(¶m).map_err(|_| ArchiveError::InvalidParam(param)) +} + +#[derive(Debug, PartialEq, Clone)] +pub struct DiffDetails { + key: StorageKey, + return_type: ArchiveStorageDiffType, + child_trie_key: Option, + child_trie_key_string: Option, +} + +/// The type of storage query. +#[derive(Debug, PartialEq, Clone, Copy)] +enum FetchStorageType { + /// Only fetch the value. + Value, + /// Only fetch the hash. + Hash, + /// Fetch both the value and the hash. + Both, +} -/// Generates the events of the `archive_storage` method. -pub struct ArchiveStorage { - /// Storage client. +/// The return value of the `fetch_storage` method. +#[derive(Debug, PartialEq, Clone)] +enum FetchedStorage { + /// Storage value under a key. + Value(StorageResult), + /// Storage hash under a key. + Hash(StorageResult), + /// Both storage value and hash under a key. + Both { value: StorageResult, hash: StorageResult }, +} + +pub struct ArchiveStorageDiff { client: Storage, - /// The maximum number of responses the API can return for a descendant query at a time. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, } -impl ArchiveStorage { - /// Constructs a new [`ArchiveStorage`]. - pub fn new( - client: Arc, - storage_max_descendant_responses: usize, - storage_max_queried_items: usize, - ) -> Self { - Self { - client: Storage::new(client), - storage_max_descendant_responses, - storage_max_queried_items, - } +impl ArchiveStorageDiff { + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client) } } } -impl ArchiveStorage +impl ArchiveStorageDiff where Block: BlockT + 'static, BE: Backend + 'static, - Client: StorageProvider + 'static, + Client: StorageProvider + Send + Sync + 'static, { - /// Generate the response of the `archive_storage` method. - pub fn handle_query( + /// Fetch the storage from the given key. + fn fetch_storage( &self, hash: Block::Hash, - mut items: Vec>, - child_key: Option, - ) -> ArchiveStorageResult { - let discarded_items = items.len().saturating_sub(self.storage_max_queried_items); - items.truncate(self.storage_max_queried_items); + key: StorageKey, + maybe_child_trie: Option, + ty: FetchStorageType, + ) -> Result, String> { + match ty { + FetchStorageType::Value => { + let result = self.client.query_value(hash, &key, maybe_child_trie.as_ref())?; + + Ok(result.map(FetchedStorage::Value)) + }, + + FetchStorageType::Hash => { + let result = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())?; + + Ok(result.map(FetchedStorage::Hash)) + }, + + FetchStorageType::Both => { + let Some(value) = self.client.query_value(hash, &key, maybe_child_trie.as_ref())? + else { + return Ok(None); + }; + + let Some(hash) = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())? + else { + return Ok(None); + }; + + Ok(Some(FetchedStorage::Both { value, hash })) + }, + } + } + + /// Check if the key belongs to the provided query items. + /// + /// A key belongs to the query items when: + /// - the provided key is a prefix of the key in the query items. + /// - the query items are empty. + /// + /// Returns an optional `FetchStorageType` based on the query items. + /// If the key does not belong to the query items, returns `None`. + fn belongs_to_query(key: &StorageKey, items: &[DiffDetails]) -> Option { + // User has requested all keys, by default this fallbacks to fetching the value. + if items.is_empty() { + return Some(FetchStorageType::Value) + } + + let mut value = false; + let mut hash = false; - let mut storage_results = Vec::with_capacity(items.len()); for item in items { - match item.query_type { - StorageQueryType::Value => { - match self.client.query_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::Hash => - match self.client.query_hash(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::ClosestDescendantMerkleValue => - match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::DescendantsValues => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: item.pagination_start_key, - }, + if key.as_ref().starts_with(&item.key.as_ref()) { + match item.return_type { + ArchiveStorageDiffType::Value => value = true, + ArchiveStorageDiffType::Hash => hash = true, + } + } + } + + match (value, hash) { + (true, true) => Some(FetchStorageType::Both), + (true, false) => Some(FetchStorageType::Value), + (false, true) => Some(FetchStorageType::Hash), + (false, false) => None, + } + } + + /// Send the provided result to the `tx` sender. + /// + /// Returns `false` if the sender has been closed. + fn send_result( + tx: &mpsc::Sender, + result: FetchedStorage, + operation_type: ArchiveStorageDiffOperationType, + child_trie_key: Option, + ) -> bool { + let items = match result { + FetchedStorage::Value(storage_result) | FetchedStorage::Hash(storage_result) => + vec![storage_result], + FetchedStorage::Both { value, hash } => vec![value, hash], + }; + + for item in items { + let res = ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: item.key, + result: item.result, + operation_type, + child_trie_key: child_trie_key.clone(), + }); + if tx.blocking_send(res).is_err() { + return false + } + } + + true + } + + fn handle_trie_queries_inner( + &self, + hash: Block::Hash, + previous_hash: Block::Hash, + items: Vec, + tx: &mpsc::Sender, + ) -> Result<(), String> { + // Parse the child trie key as `ChildInfo` and `String`. + let maybe_child_trie = items.first().and_then(|item| item.child_trie_key.clone()); + let maybe_child_trie_str = + items.first().and_then(|item| item.child_trie_key_string.clone()); + + // Iterator over the current block and previous block + // at the same time to compare the keys. This approach effectively + // leverages backpressure to avoid memory consumption. + let keys_iter = self.client.raw_keys_iter(hash, maybe_child_trie.clone())?; + let previous_keys_iter = + self.client.raw_keys_iter(previous_hash, maybe_child_trie.clone())?; + + let mut diff_iter = lexicographic_diff(keys_iter, previous_keys_iter); + + while let Some(item) = diff_iter.next() { + let (operation_type, key) = match item { + Diff::Added(key) => (ArchiveStorageDiffOperationType::Added, key), + Diff::Deleted(key) => (ArchiveStorageDiffOperationType::Deleted, key), + Diff::Equal(key) => (ArchiveStorageDiffOperationType::Modified, key), + }; + + let Some(fetch_type) = Self::belongs_to_query(&key, &items) else { + // The key does not belong the the query items. + continue; + }; + + let maybe_result = match operation_type { + ArchiveStorageDiffOperationType::Added => + self.fetch_storage(hash, key.clone(), maybe_child_trie.clone(), fetch_type)?, + ArchiveStorageDiffOperationType::Deleted => self.fetch_storage( + previous_hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )?, + ArchiveStorageDiffOperationType::Modified => { + let Some(storage_result) = self.fetch_storage( hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )? + else { + continue + }; + + let Some(previous_storage_result) = self.fetch_storage( + previous_hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )? + else { + continue + }; + + // For modified records we need to check the actual storage values. + if storage_result == previous_storage_result { + continue } + + Some(storage_result) }, - StorageQueryType::DescendantsHashes => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } + }; + + if let Some(storage_result) = maybe_result { + if !Self::send_result( + &tx, + storage_result, + operation_type, + maybe_child_trie_str.clone(), + ) { + return Ok(()) + } + } + } + + Ok(()) + } + + /// This method will iterate over the keys of the main trie or a child trie and fetch the + /// given keys. The fetched keys will be sent to the provided `tx` sender to leverage + /// the backpressure mechanism. + pub async fn handle_trie_queries( + &self, + hash: Block::Hash, + items: Vec>, + previous_hash: Block::Hash, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = ArchiveStorageDiff { client: self.client.clone() }; + + tokio::task::spawn_blocking(move || { + // Deduplicate the items. + let mut trie_items = match deduplicate_storage_diff_items(items) { + Ok(items) => items, + Err(error) => { + let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error.to_string())); + return }, }; + // Default to using the main storage trie if no items are provided. + if trie_items.is_empty() { + trie_items.push(Vec::new()); + } + log::trace!(target: LOG_TARGET, "Storage diff deduplicated items: {:?}", trie_items); + + for items in trie_items { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: hash={:?}, previous_hash={:?}, items={:?}", + hash, + previous_hash, + items + ); + + let result = this.handle_trie_queries_inner(hash, previous_hash, items, &tx); + + if let Err(error) = result { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: sending error={:?}", + error, + ); + + let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error)); + + return + } else { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: sending storage diff done", + ); + } + } + + let _ = tx.blocking_send(ArchiveStorageDiffEvent::StorageDiffDone); + }) + .await?; + + Ok(()) + } +} + +/// The result of the `lexicographic_diff` method. +#[derive(Debug, PartialEq)] +enum Diff { + Added(T), + Deleted(T), + Equal(T), +} + +/// Compare two iterators lexicographically and return the differences. +fn lexicographic_diff( + mut left: LeftIter, + mut right: RightIter, +) -> impl Iterator> +where + T: Ord, + LeftIter: Iterator, + RightIter: Iterator, +{ + let mut a = left.next(); + let mut b = right.next(); + + core::iter::from_fn(move || match (a.take(), b.take()) { + (Some(a_value), Some(b_value)) => + if a_value < b_value { + b = Some(b_value); + a = left.next(); + + Some(Diff::Added(a_value)) + } else if a_value > b_value { + a = Some(a_value); + b = right.next(); + + Some(Diff::Deleted(b_value)) + } else { + a = left.next(); + b = right.next(); + + Some(Diff::Equal(a_value)) + }, + (Some(a_value), None) => { + a = left.next(); + Some(Diff::Added(a_value)) + }, + (None, Some(b_value)) => { + b = right.next(); + Some(Diff::Deleted(b_value)) + }, + (None, None) => None, + }) +} + +/// Deduplicate the provided items and return a list of `DiffDetails`. +/// +/// Each list corresponds to a single child trie or the main trie. +fn deduplicate_storage_diff_items( + items: Vec>, +) -> Result>, ArchiveError> { + let mut deduplicated: HashMap, Vec> = HashMap::new(); + + for diff_item in items { + // Ensure the provided hex keys are valid before deduplication. + let key = StorageKey(parse_hex_param(diff_item.key)?); + let child_trie_key_string = diff_item.child_trie_key.clone(); + let child_trie_key = diff_item + .child_trie_key + .map(|child_trie_key| parse_hex_param(child_trie_key)) + .transpose()? + .map(ChildInfo::new_default_from_vec); + + let diff_item = DiffDetails { + key, + return_type: diff_item.return_type, + child_trie_key: child_trie_key.clone(), + child_trie_key_string, + }; + + match deduplicated.entry(child_trie_key.clone()) { + Entry::Occupied(mut entry) => { + let mut should_insert = true; + + for existing in entry.get() { + // This points to a different return type. + if existing.return_type != diff_item.return_type { + continue + } + // Keys and return types are identical. + if existing.key == diff_item.key { + should_insert = false; + break + } + + // The following two conditions ensure that we keep the shortest key. + + // The current key is a longer prefix of the existing key. + if diff_item.key.as_ref().starts_with(&existing.key.as_ref()) { + should_insert = false; + break + } + + // The existing key is a longer prefix of the current key. + // We need to keep the current key and remove the existing one. + if existing.key.as_ref().starts_with(&diff_item.key.as_ref()) { + let to_remove = existing.clone(); + entry.get_mut().retain(|item| item != &to_remove); + break; + } + } + + if should_insert { + entry.get_mut().push(diff_item); + } + }, + Entry::Vacant(entry) => { + entry.insert(vec![diff_item]); + }, } + } + + Ok(deduplicated + .into_iter() + .sorted_by_key(|(child_trie_key, _)| child_trie_key.clone()) + .map(|(_, values)| values) + .collect()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn dedup_empty() { + let items = vec![]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert!(result.is_empty()); + } + + #[test] + fn dedup_single() { + let items = vec![ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }; + assert_eq!(result[0][0], expected); + } + + #[test] + fn dedup_with_different_keys() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 2); + + let expected = vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + ]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_same_keys() { + // Identical keys. + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_same_prefix() { + // Identical keys. + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_different_return_types() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 2); + + let expected = vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + child_trie_key_string: None, + }, + ]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_different_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 2); + assert_eq!(result[0].len(), 1); + assert_eq!(result[1].len(), 1); + + let expected = vec![ + vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }], + vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }], + ]; + assert_eq!(result, expected); + } + + #[test] + fn dedup_with_same_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_shorter_key_reverse_order() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_multiple_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x02".into()), + }, + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ]; + + let result = deduplicate_storage_diff_items(items).unwrap(); + + let expected = vec![ + vec![DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }], + vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }, + DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }, + ], + vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }, + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }, + ], + ]; + + assert_eq!(result, expected); + } + + #[test] + fn test_lexicographic_diff() { + let left = vec![1, 2, 3, 4, 5]; + let right = vec![2, 3, 4, 5, 6]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Added(1), + Diff::Equal(2), + Diff::Equal(3), + Diff::Equal(4), + Diff::Equal(5), + Diff::Deleted(6), + ]; + assert_eq!(diff, expected); + } + + #[test] + fn test_lexicographic_diff_one_side_empty() { + let left = vec![]; + let right = vec![1, 2, 3, 4, 5, 6]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Deleted(1), + Diff::Deleted(2), + Diff::Deleted(3), + Diff::Deleted(4), + Diff::Deleted(5), + Diff::Deleted(6), + ]; + assert_eq!(diff, expected); + + let left = vec![1, 2, 3, 4, 5, 6]; + let right = vec![]; - ArchiveStorageResult::ok(storage_results, discarded_items) + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Added(1), + Diff::Added(2), + Diff::Added(3), + Diff::Added(4), + Diff::Added(5), + Diff::Added(6), + ]; + assert_eq!(diff, expected); } } diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 5f020c203eab..14fa104c113a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -32,4 +32,4 @@ pub mod archive; pub mod error; pub use api::ArchiveApiServer; -pub use archive::{Archive, ArchiveConfig}; +pub use archive::Archive; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 078016f5b3e2..48cbbaa4934a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -18,24 +18,25 @@ use crate::{ common::events::{ - ArchiveStorageMethodOk, ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, - StorageResultType, + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageEvent, StorageQuery, + StorageQueryType, StorageResult, StorageResultType, }, hex_string, MethodResult, }; -use super::{ - archive::{Archive, ArchiveConfig}, - *, -}; +use super::{archive::Archive, *}; use assert_matches::assert_matches; use codec::{Decode, Encode}; use jsonrpsee::{ - core::EmptyServerParams as EmptyParams, rpc_params, MethodsError as Error, RpcModule, + core::{server::Subscription as RpcSubscription, EmptyServerParams as EmptyParams}, + rpc_params, MethodsError as Error, RpcModule, }; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; +use sc_rpc::testing::TokioTestExecutor; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{Blake2Hasher, Hasher}; @@ -51,8 +52,6 @@ use substrate_test_runtime_client::{ const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; -const MAX_PAGINATION_LIMIT: usize = 5; -const MAX_QUERIED_LIMIT: usize = 5; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_STORAGE_KEY: &[u8] = b"child"; @@ -61,10 +60,7 @@ const CHILD_VALUE: &[u8] = b"child value"; type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; -fn setup_api( - max_descendant_responses: usize, - max_queried_items: usize, -) -> (Arc>, RpcModule>>) { +fn setup_api() -> (Arc>, RpcModule>>) { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -78,16 +74,25 @@ fn setup_api( client.clone(), backend, CHAIN_GENESIS, - ArchiveConfig { max_descendant_responses, max_queried_items }, + Arc::new(TokioTestExecutor::default()), ) .into_rpc(); (client, api) } +async fn get_next_event(sub: &mut RpcSubscription) -> T { + let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + event +} + #[tokio::test] async fn archive_genesis() { - let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_client, api) = setup_api(); let genesis: String = api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); @@ -96,7 +101,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -112,8 +117,8 @@ async fn archive_body() { builder .push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -130,7 +135,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -146,8 +151,8 @@ async fn archive_header() { builder .push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -164,7 +169,7 @@ async fn archive_header() { #[tokio::test] async fn archive_finalized_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let client_height: u32 = client.info().finalized_number.saturated_into(); @@ -176,7 +181,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -244,8 +249,8 @@ async fn archive_hash_by_height() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -282,7 +287,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -325,7 +330,7 @@ async fn archive_call() { client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); // Valid call. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let result: MethodResult = api @@ -341,7 +346,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -355,42 +360,23 @@ async fn archive_storage_hashes_values() { let block_hash = format!("{:?}", block.header.hash()); let key = hex_string(&KEY); - let items: Vec> = vec![ - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsHashes, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, ]; - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - // Key has not been imported yet. - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + // Key has not been imported yet. + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); // Import a block with the given key value pair. let mut builder = BlockBuilderBuilder::new(&*client) @@ -406,32 +392,103 @@ async fn archive_storage_hashes_values() { let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); let expected_value = hex_string(&VALUE); - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 4); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, key); - assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone())); - assert_eq!(result[1].key, key); - assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone())); - assert_eq!(result[2].key, key); - assert_eq!(result[2].result, StorageResultType::Hash(expected_hash)); - assert_eq!(result[3].key, key); - assert_eq!(result[3].result, StorageResultType::Value(expected_value)); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value), + child_trie_key: None, + }), + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_hashes_values_child_trie() { + let (client, api) = setup_api(); + + // Get child storage values set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let key = hex_string(&KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + let expected_value = hex_string(&CHILD_VALUE); + + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storage", + rpc_params![&genesis_hash, items, &child_info], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); } #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); /// The core of this test. /// @@ -443,55 +500,47 @@ async fn archive_storage_closest_merkle_value() { api: &RpcModule>>, block_hash: String, ) -> HashMap { - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, vec![ - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key with descendant. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key that are not part of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, ] ], @@ -499,19 +548,21 @@ async fn archive_storage_closest_merkle_value() { .await .unwrap(); - let merkle_values: HashMap<_, _> = match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result - .into_iter() - .map(|res| { - let value = match res.result { + let mut merkle_values = HashMap::new(); + loop { + let event = get_next_event::(&mut sub).await; + match event { + ArchiveStorageEvent::Storage(result) => { + let str_result = match result.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, - _ => panic!("Unexpected StorageResultType"), + _ => panic!("Unexpected result type"), }; - (res.key, value) - }) - .collect(), - _ => panic!("Unexpected result"), - }; + merkle_values.insert(result.key, str_result); + }, + ArchiveStorageEvent::StorageError(err) => panic!("Unexpected error {err:?}"), + ArchiveStorageEvent::StorageDone => break, + } + } // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -590,9 +641,9 @@ async fn archive_storage_closest_merkle_value() { } #[tokio::test] -async fn archive_storage_paginate_iterations() { +async fn archive_storage_iterations() { // 1 iteration allowed before pagination kicks in. - let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -611,230 +662,344 @@ async fn archive_storage_paginate_iterations() { // Calling with an invalid hash. let invalid_hash = hex_string(&INVALID_HASH); - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &invalid_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Err(_) => (), - _ => panic!("Unexpected result"), - }; + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(_) + ); // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":m"), + result: StorageResultType::Value(hex_string(b"a")), + child_trie_key: None, + }) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":m")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mo"), + result: StorageResultType::Value(hex_string(b"ab")), + child_trie_key: None, + }) + ); - assert_eq!(result[0].key, hex_string(b":mo")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moD"), + result: StorageResultType::Value(hex_string(b"abcmoD")), + child_trie_key: None, + }) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mo")), - }] - ], - ) - .await + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moc"), + result: StorageResultType::Value(hex_string(b"abc")), + child_trie_key: None, + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mock"), + result: StorageResultType::Value(hex_string(b"abcd")), + child_trie_key: None, + }) + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_diff_main_trie() { + let (client, api) = setup_api(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); - assert_eq!(result[0].key, hex_string(b":moD")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD"))); - }, - _ => panic!("Unexpected result"), - }; + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(prev_block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"11".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"22".to_vec())).unwrap(); + builder.push_storage_change(b":AAA".to_vec(), Some(b"222".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moD")), - }] - ], + // Search for items in the main trie: + // - values of keys under ":A" + // - hashes of keys under ":AA" + let items = vec![ + ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem:: { + key: hex_string(b":AA"), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":moc")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc"))); - }, - _ => panic!("Unexpected result"), - }; + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":A"), + result: StorageResultType::Value(hex_string(b"11")), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moc")), - }] - ], - ) - .await + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Value(hex_string(b"22")), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"22"))), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + // Added key. + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AAA"), + result: StorageResultType::Value(hex_string(b"222")), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AAA"), + result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"222"))), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_no_changes() { + let (client, api) = setup_api(); + + // Build 2 identical blocks. + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); + builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); - assert_eq!(result[0].key, hex_string(b":mock")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd"))); - }, - _ => panic!("Unexpected result"), - }; + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(prev_block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Continue with pagination until no keys are returned. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mock")), - }] - ], + // Search for items in the main trie with keys prefixed with ":A". + let items = vec![ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); } #[tokio::test] -async fn archive_storage_discarded_items() { - // One query at a time - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); +async fn archive_storage_diff_deleted_changes() { + let (client, api) = setup_api(); - // Import a new block with storage changes. + // Blocks are imported as forks. let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) .with_parent_block_number(0) .build() .unwrap(); - builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); + builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder + .push_transfer(Transfer { + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); let block = builder.build().unwrap().block; let block_hash = format!("{:?}", block.header.hash()); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![ - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - } - ] - ], + // Search for items in the main trie with keys prefixed with ":A". + let items = vec![ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 2); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Value(hex_string(b"BB")), + operation_type: ArchiveStorageDiffOperationType::Deleted, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_invalid_params() { + let invalid_hash = hex_string(&INVALID_HASH); + let (_, api) = setup_api(); + + // Invalid shape for parameters. + let items: Vec> = Vec::new(); + let err = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params!["123", items.clone(), &invalid_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(ref err) if err.code() == crate::chain_head::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" + ); + + // The shape is right, but the block hash is invalid. + let items: Vec> = Vec::new(); + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&invalid_hash, items.clone(), &invalid_hash], + ) + .await + .unwrap(); + + let event = get_next_event::(&mut sub).await; + assert_matches!(event, + ArchiveStorageDiffEvent::StorageDiffError(ref err) if err.error.contains("Header was not found") + ); } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 61eb47d1b9ab..b949fb25402b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -318,7 +318,7 @@ where }), }; - let (rp, rp_fut) = method_started_response(operation_id); + let (rp, rp_fut) = method_started_response(operation_id, None); let fut = async move { // Wait for the server to send out the response and if it produces an error no event // should be generated. @@ -432,7 +432,8 @@ where let mut storage_client = ChainHeadStorage::::new(self.client.clone()); - let (rp, rp_fut) = method_started_response(block_guard.operation().operation_id()); + // Storage items are never discarded. + let (rp, rp_fut) = method_started_response(block_guard.operation().operation_id(), Some(0)); let fut = async move { // Wait for the server to send out the response and if it produces an error no event @@ -507,7 +508,7 @@ where let operation_id = block_guard.operation().operation_id(); let client = self.client.clone(); - let (rp, rp_fut) = method_started_response(operation_id.clone()); + let (rp, rp_fut) = method_started_response(operation_id.clone(), None); let fut = async move { // Wait for the server to send out the response and if it produces an error no event // should be generated. @@ -630,8 +631,9 @@ where fn method_started_response( operation_id: String, + discarded_items: Option, ) -> (ResponsePayload<'static, MethodResponse>, MethodResponseFuture) { - let rp = MethodResponse::Started(MethodResponseStarted { operation_id, discarded_items: None }); + let rp = MethodResponse::Started(MethodResponseStarted { operation_id, discarded_items }); ResponsePayload::success(rp).notify_on_completion() } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index bd9863060910..de74145a3f08 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -235,7 +235,7 @@ pub struct OperationCallDone { pub output: String, } -/// The response of the `chainHead_call` method. +/// The response of the `chainHead_storage` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct OperationStorageItems { @@ -536,6 +536,7 @@ mod tests { items: vec![StorageResult { key: "0x1".into(), result: StorageResultType::Value("0x123".to_string()), + child_trie_key: None, }], }); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 95a7c7fe1832..3e1bd23776d3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -784,7 +784,7 @@ mod tests { use super::*; use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; - use sc_service::client::new_in_mem; + use sc_service::client::new_with_backend; use sp_consensus::BlockOrigin; use sp_core::{testing::TaskExecutor, H256}; use substrate_test_runtime_client::{ @@ -811,13 +811,13 @@ mod tests { ) .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c505566d887d..3ec5e805ecd5 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -34,7 +34,7 @@ use jsonrpsee::{ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_rpc::testing::TokioTestExecutor; -use sc_service::client::new_in_mem; +use sc_service::client::new_with_backend; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ @@ -506,8 +506,8 @@ async fn get_body() { .unwrap(); builder .push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -580,7 +580,7 @@ async fn call_runtime() { ); // Valid call. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api @@ -670,7 +670,7 @@ async fn call_runtime_without_flag() { ); // Valid runtime call on a subscription started with `with_runtime` false. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); let call_parameters = hex_string(&alice_id.encode()); let err = api .call::<_, serde_json::Value>( @@ -1256,7 +1256,7 @@ async fn unique_operation_ids() { assert!(op_ids.insert(operation_id)); // Valid `chainHead_v1_call` call. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( @@ -1423,8 +1423,8 @@ async fn follow_generates_initial_blocks() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2046,8 +2046,8 @@ async fn follow_prune_best_block() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2217,8 +2217,8 @@ async fn follow_forks_pruned_block() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2233,8 +2233,8 @@ async fn follow_forks_pruned_block() { .unwrap(); block_builder .push_transfer(Transfer { - from: AccountKeyring::Bob.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Bob.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2379,8 +2379,8 @@ async fn follow_report_multiple_pruned_block() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2397,8 +2397,8 @@ async fn follow_report_multiple_pruned_block() { block_builder .push_transfer(Transfer { - from: AccountKeyring::Bob.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Bob.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2547,13 +2547,13 @@ async fn pin_block_references() { .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TokioTestExecutor::default()), None, None, - Box::new(TokioTestExecutor::default()), client_config, ) .unwrap(), @@ -2871,7 +2871,7 @@ async fn ensure_operation_limits_works() { let operation_id = match response { MethodResponse::Started(started) => { // Check discarded items. - assert!(started.discarded_items.is_none()); + assert_eq!(started.discarded_items, Some(0)); started.operation_id }, MethodResponse::LimitReached => panic!("Expected started response"), @@ -2883,7 +2883,7 @@ async fn ensure_operation_limits_works() { ); // The storage is finished and capacity must be released. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api @@ -3228,7 +3228,10 @@ async fn storage_closest_merkle_value() { .await .unwrap(); let operation_id = match response { - MethodResponse::Started(started) => started.operation_id, + MethodResponse::Started(started) => { + assert_eq!(started.discarded_items, Some(0)); + started.operation_id + }, MethodResponse::LimitReached => panic!("Expected started response"), }; @@ -3534,7 +3537,7 @@ async fn chain_head_single_connection_context() { .unwrap(); assert_matches!(response, MethodResponse::LimitReached); - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( @@ -3660,8 +3663,8 @@ async fn follow_unique_pruned_blocks() { let block_6_hash = import_block(client.clone(), block_2_f_hash, 2).await.hash(); // Import block 2 as best on the fork. let mut tx_alice_ferdie = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }; @@ -3843,8 +3846,8 @@ async fn follow_report_best_block_of_a_known_block() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index b1627d74c844..44f722c0c61b 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -78,10 +78,14 @@ pub struct StorageResult { /// The result of the query. #[serde(flatten)] pub result: StorageResultType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, } /// The type of the storage query. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum StorageResultType { /// Fetch the value of the provided key. @@ -105,23 +109,41 @@ pub struct StorageResultErr { /// The result of a storage call. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum ArchiveStorageResult { +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageEvent { /// Query generated a result. - Ok(ArchiveStorageMethodOk), + Storage(StorageResult), /// Query encountered an error. - Err(ArchiveStorageMethodErr), + StorageError(ArchiveStorageMethodErr), + /// Operation storage is done. + StorageDone, } -impl ArchiveStorageResult { - /// Create a new `ArchiveStorageResult::Ok` result. - pub fn ok(result: Vec, discarded_items: usize) -> Self { - Self::Ok(ArchiveStorageMethodOk { result, discarded_items }) +impl ArchiveStorageEvent { + /// Create a new `ArchiveStorageEvent::StorageErr` event. + pub fn err(error: String) -> Self { + Self::StorageError(ArchiveStorageMethodErr { error }) } - /// Create a new `ArchiveStorageResult::Err` result. - pub fn err(error: String) -> Self { - Self::Err(ArchiveStorageMethodErr { error }) + /// Create a new `ArchiveStorageEvent::StorageResult` event. + pub fn result(result: StorageResult) -> Self { + Self::Storage(result) + } + + /// Checks if the event is a `StorageDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDone) + } + + /// Checks if the event is a `StorageErr` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageError(_)) + } + + /// Checks if the event is a `StorageResult` event. + pub fn is_result(&self) -> bool { + matches!(self, Self::Storage(_)) } } @@ -136,22 +158,229 @@ pub struct ArchiveStorageMethodOk { } /// The error of a storage call. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArchiveStorageMethodErr { /// Reported error. pub error: String, } +/// The type of the archive storage difference query. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ArchiveStorageDiffType { + /// The result is provided as value of the key. + Value, + /// The result the hash of the value of the key. + Hash, +} + +/// The storage item to query. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffItem { + /// The provided key. + pub key: Key, + /// The type of the storage query. + pub return_type: ArchiveStorageDiffType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, +} + +/// The result of a storage difference call. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffMethodResult { + /// Reported results. + pub result: Vec, +} + +/// The result of a storage difference call operation type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ArchiveStorageDiffOperationType { + /// The key is added. + Added, + /// The key is modified. + Modified, + /// The key is removed. + Deleted, +} + +/// The result of an individual storage difference key. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffResult { + /// The hex-encoded key of the result. + pub key: String, + /// The result of the query. + #[serde(flatten)] + pub result: StorageResultType, + /// The operation type. + #[serde(rename = "type")] + pub operation_type: ArchiveStorageDiffOperationType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, +} + +/// The event generated by the `archive_storageDiff` method. +/// +/// The `archive_storageDiff` can generate the following events: +/// - `storageDiff` event - generated when a `ArchiveStorageDiffResult` is produced. +/// - `storageDiffError` event - generated when an error is produced. +/// - `storageDiffDone` event - generated when the `archive_storageDiff` method completed. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageDiffEvent { + /// The `storageDiff` event. + StorageDiff(ArchiveStorageDiffResult), + /// The `storageDiffError` event. + StorageDiffError(ArchiveStorageMethodErr), + /// The `storageDiffDone` event. + StorageDiffDone, +} + +impl ArchiveStorageDiffEvent { + /// Create a new `ArchiveStorageDiffEvent::StorageDiffError` event. + pub fn err(error: String) -> Self { + Self::StorageDiffError(ArchiveStorageMethodErr { error }) + } + + /// Checks if the event is a `StorageDiffDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDiffDone) + } + + /// Checks if the event is a `StorageDiffError` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageDiffError(_)) + } +} + #[cfg(test)] mod tests { use super::*; + #[test] + fn archive_diff_input() { + // Item with Value. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"value"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"hash"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Value and child trie key. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"value","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash and child trie key. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"hash","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } + + #[test] + fn archive_diff_output() { + // Item with Value. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","value":"res","type":"added"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res","type":"modified"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash, child trie key and removed. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + operation_type: ArchiveStorageDiffOperationType::Deleted, + child_trie_key: Some("0x2".into()), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res","type":"deleted","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } + #[test] fn storage_result() { // Item with Value. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","value":"res"}"#; @@ -161,8 +390,11 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","hash":"res"}"#; @@ -175,6 +407,7 @@ mod tests { let item = StorageResult { key: "0x1".into(), result: StorageResultType::ClosestDescendantMerkleValue("res".into()), + child_trie_key: None, }; // Encode let ser = serde_json::to_string(&item).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index 2e24a8da8ca8..a1e34d51530e 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -24,7 +24,7 @@ use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; use tokio::sync::mpsc; -use super::events::{StorageResult, StorageResultType}; +use super::events::{StorageQuery, StorageQueryType, StorageResult, StorageResultType}; use crate::hex_string; /// Call into the storage of blocks. @@ -70,9 +70,6 @@ pub enum IterQueryType { /// The result of making a query call. pub type QueryResult = Result, String>; -/// The result of iterating over keys. -pub type QueryIterResult = Result<(Vec, Option), String>; - impl Storage where Block: BlockT + 'static, @@ -97,6 +94,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Value(hex_string(&storage_data.0)), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -120,6 +118,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -149,6 +148,7 @@ where StorageResult { key: hex_string(&key.0), result: StorageResultType::ClosestDescendantMerkleValue(result), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), } })) }) @@ -199,53 +199,111 @@ where } } - /// Iterate over at most the provided number of keys. - /// - /// Returns the storage result with a potential next key to resume iteration. - pub fn query_iter_pagination( + /// Raw iterator over the keys. + pub fn raw_keys_iter( &self, - query: QueryIter, hash: Block::Hash, - child_key: Option<&ChildInfo>, - count: usize, - ) -> QueryIterResult { - let QueryIter { ty, query_key, pagination_start_key } = query; - - let mut keys_iter = if let Some(child_key) = child_key { - self.client.child_storage_keys( - hash, - child_key.to_owned(), - Some(&query_key), - pagination_start_key.as_ref(), - ) + child_key: Option, + ) -> Result, String> { + let keys_iter = if let Some(child_key) = child_key { + self.client.child_storage_keys(hash, child_key, None, None) } else { - self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) - } - .map_err(|err| err.to_string())?; + self.client.storage_keys(hash, None, None) + }; + + keys_iter.map_err(|err| err.to_string()) + } +} - let mut ret = Vec::with_capacity(count); - let mut next_pagination_key = None; - for _ in 0..count { - let Some(key) = keys_iter.next() else { break }; +/// Generates storage events for `chainHead_storage` and `archive_storage` subscriptions. +pub struct StorageSubscriptionClient { + /// Storage client. + client: Storage, + _phandom: PhantomData<(BE, Block)>, +} - next_pagination_key = Some(key.clone()); +impl Clone for StorageSubscriptionClient { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} - let result = match ty { - IterQueryType::Value => self.query_value(hash, &key, child_key), - IterQueryType::Hash => self.query_hash(hash, &key, child_key), - }?; +impl StorageSubscriptionClient { + /// Constructs a new [`StorageSubscriptionClient`]. + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client), _phandom: PhantomData } + } +} - if let Some(value) = result { - ret.push(value); +impl StorageSubscriptionClient +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + Send + Sync + 'static, +{ + /// Generate storage events to the provided sender. + pub async fn generate_events( + &mut self, + hash: Block::Hash, + items: Vec>, + child_key: Option, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = self.clone(); + + tokio::task::spawn_blocking(move || { + for item in items { + match item.query_type { + StorageQueryType::Value => { + let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::Hash => { + let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::ClosestDescendantMerkleValue => { + let rp = + this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::DescendantsValues => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + StorageQueryType::DescendantsHashes => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + } } - } + }) + .await?; - // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|_| QueryIter { - ty, - query_key, - pagination_start_key: next_pagination_key, - }); - Ok((ret, maybe_next_query)) + Ok(()) } } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index efb3bd94ddbf..c2f11878e8fc 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -23,7 +23,7 @@ use jsonrpsee::{rpc_params, MethodsError as Error}; use sc_transaction_pool::{Options, PoolLimit}; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; use std::sync::Arc; -use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_client::Sr25519Keyring::*; use substrate_test_runtime_transaction_pool::uxt; const MAX_TX_PER_CONNECTION: usize = 4; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs index 53c5b8ce3895..879d51eaf5f3 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs @@ -26,7 +26,7 @@ use jsonrpsee::rpc_params; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; use sp_core::H256; use std::{sync::Arc, vec}; -use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_client::Sr25519Keyring::*; use substrate_test_runtime_transaction_pool::uxt; // Test helpers. diff --git a/substrate/client/rpc/src/author/tests.rs b/substrate/client/rpc/src/author/tests.rs index ab0b8bdab699..b1c899667624 100644 --- a/substrate/client/rpc/src/author/tests.rs +++ b/substrate/client/rpc/src/author/tests.rs @@ -39,15 +39,15 @@ use std::sync::Arc; use substrate_test_runtime_client::{ self, runtime::{Block, Extrinsic, ExtrinsicBuilder, SessionKeys, Transfer}, - AccountKeyring, Backend, Client, DefaultTestClientBuilderExt, TestClientBuilderExt, + Backend, Client, DefaultTestClientBuilderExt, Sr25519Keyring, TestClientBuilderExt, }; -fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { +fn uxt(sender: Sr25519Keyring, nonce: u64) -> Extrinsic { let tx = Transfer { amount: Default::default(), nonce, from: sender.into(), - to: AccountKeyring::Bob.into(), + to: Sr25519Keyring::Bob.into(), }; ExtrinsicBuilder::new_transfer(tx).build() } @@ -99,7 +99,7 @@ impl TestSetup { async fn author_submit_transaction_should_not_cause_error() { let api = TestSetup::into_rpc(); - let xt: Bytes = uxt(AccountKeyring::Alice, 1).encode().into(); + let xt: Bytes = uxt(Sr25519Keyring::Alice, 1).encode().into(); let extrinsic_hash: H256 = blake2_256(&xt).into(); let response: H256 = api.call("author_submitExtrinsic", [xt.clone()]).await.unwrap(); @@ -116,7 +116,7 @@ async fn author_should_watch_extrinsic() { let api = TestSetup::into_rpc(); let xt = to_hex( &ExtrinsicBuilder::new_call_with_priority(0) - .signer(AccountKeyring::Alice.into()) + .signer(Sr25519Keyring::Alice.into()) .build() .encode(), true, @@ -135,7 +135,7 @@ async fn author_should_watch_extrinsic() { // Replace the extrinsic and observe the subscription is notified. let (xt_replacement, xt_hash) = { let tx = ExtrinsicBuilder::new_call_with_priority(1) - .signer(AccountKeyring::Alice.into()) + .signer(Sr25519Keyring::Alice.into()) .build() .encode(); let hash = blake2_256(&tx); @@ -172,7 +172,7 @@ async fn author_should_return_watch_validation_error() { async fn author_should_return_pending_extrinsics() { let api = TestSetup::into_rpc(); - let xt_bytes: Bytes = uxt(AccountKeyring::Alice, 0).encode().into(); + let xt_bytes: Bytes = uxt(Sr25519Keyring::Alice, 0).encode().into(); api.call::<_, H256>("author_submitExtrinsic", [to_hex(&xt_bytes, true)]) .await .unwrap(); @@ -190,14 +190,14 @@ async fn author_should_remove_extrinsics() { // Submit three extrinsics, then remove two of them (will cause the third to be removed as well, // having a higher nonce) - let xt1_bytes = uxt(AccountKeyring::Alice, 0).encode(); + let xt1_bytes = uxt(Sr25519Keyring::Alice, 0).encode(); let xt1 = to_hex(&xt1_bytes, true); let xt1_hash: H256 = api.call("author_submitExtrinsic", [xt1]).await.unwrap(); - let xt2 = to_hex(&uxt(AccountKeyring::Alice, 1).encode(), true); + let xt2 = to_hex(&uxt(Sr25519Keyring::Alice, 1).encode(), true); let xt2_hash: H256 = api.call("author_submitExtrinsic", [xt2]).await.unwrap(); - let xt3 = to_hex(&uxt(AccountKeyring::Bob, 0).encode(), true); + let xt3 = to_hex(&uxt(Sr25519Keyring::Bob, 0).encode(), true); let xt3_hash: H256 = api.call("author_submitExtrinsic", [xt3]).await.unwrap(); assert_eq!(setup.pool.status().ready, 3); diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index 6b711f2425e9..c02f0d0b759b 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -228,8 +228,8 @@ async fn should_notify_about_storage_changes() { .unwrap(); builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -255,11 +255,11 @@ async fn should_send_initial_storage_changes_and_notifications() { let alice_balance_key = [ sp_crypto_hashing::twox_128(b"System"), sp_crypto_hashing::twox_128(b"Account"), - sp_crypto_hashing::blake2_128(&AccountKeyring::Alice.public()), + sp_crypto_hashing::blake2_128(&Sr25519Keyring::Alice.public()), ] .concat() .iter() - .chain(AccountKeyring::Alice.public().0.iter()) + .chain(Sr25519Keyring::Alice.public().0.iter()) .cloned() .collect::>(); @@ -281,8 +281,8 @@ async fn should_send_initial_storage_changes_and_notifications() { .unwrap(); builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) diff --git a/substrate/client/runtime-utilities/Cargo.toml b/substrate/client/runtime-utilities/Cargo.toml new file mode 100644 index 000000000000..5e026a5eff1a --- /dev/null +++ b/substrate/client/runtime-utilities/Cargo.toml @@ -0,0 +1,36 @@ +[package] +description = "Substrate client utilities for frame runtime functions calls." +name = "sc-runtime-utilities" +version = "0.1.0" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true +documentation = "https://docs.rs/sc-metadata" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true, default-features = true } + +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } + + +thiserror = { workspace = true } + +[dev-dependencies] +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +subxt = { workspace = true, features = ["native"] } diff --git a/substrate/client/runtime-utilities/src/error.rs b/substrate/client/runtime-utilities/src/error.rs new file mode 100644 index 000000000000..a0f1e45a5e57 --- /dev/null +++ b/substrate/client/runtime-utilities/src/error.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +//! Errors types of runtime utilities. + +/// Generic result for the runtime utilities. +pub type Result = std::result::Result; + +/// Error type for the runtime utilities. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error { + #[error("Scale codec error: {0}")] + ScaleCodec(#[from] codec::Error), + #[error("Opaque metadata not found")] + OpaqueMetadataNotFound, + #[error("Stable metadata version not found")] + StableMetadataVersionNotFound, + #[error("WASM executor error: {0}")] + Executor(#[from] sc_executor_common::error::Error), +} diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs b/substrate/client/runtime-utilities/src/lib.rs similarity index 56% rename from substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs rename to substrate/client/runtime-utilities/src/lib.rs index c498da38afb0..1ae3e2f1105a 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs +++ b/substrate/client/runtime-utilities/src/lib.rs @@ -1,21 +1,29 @@ // This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate client runtime utilities. +//! +//! Provides convenient APIs to ease calling functions contained by a FRAME +//! runtime WASM blob. +#![warn(missing_docs)] use codec::{Decode, Encode}; +use error::{Error, Result}; use sc_executor::WasmExecutor; use sp_core::{ traits::{CallContext, CodeExecutor, FetchRuntimeCode, RuntimeCode}, @@ -25,36 +33,35 @@ use sp_state_machine::BasicExternalities; use sp_wasm_interface::HostFunctions; use std::borrow::Cow; +pub mod error; + /// Fetches the latest metadata from the given runtime blob. pub fn fetch_latest_metadata_from_code_blob( executor: &WasmExecutor, code_bytes: Cow<[u8]>, -) -> sc_cli::Result { +) -> Result { let runtime_caller = RuntimeCaller::new(executor, code_bytes); let version_result = runtime_caller.call("Metadata_metadata_versions", ()); - let opaque_metadata: OpaqueMetadata = match version_result { + match version_result { Ok(supported_versions) => { - let latest_version = Vec::::decode(&mut supported_versions.as_slice()) - .map_err(|e| format!("Unable to decode version list: {e}"))? - .pop() - .ok_or("No metadata versions supported".to_string())?; - - let encoded = runtime_caller - .call("Metadata_metadata_at_version", latest_version) - .map_err(|_| "Unable to fetch metadata from blob".to_string())?; + let supported_versions = Vec::::decode(&mut supported_versions.as_slice())?; + let latest_stable = supported_versions + .into_iter() + .filter(|v| *v != u32::MAX) + .max() + .ok_or(Error::StableMetadataVersionNotFound)?; + + let encoded = runtime_caller.call("Metadata_metadata_at_version", latest_stable)?; + Option::::decode(&mut encoded.as_slice())? - .ok_or_else(|| "Metadata not found".to_string())? + .ok_or(Error::OpaqueMetadataNotFound) }, Err(_) => { - let encoded = runtime_caller - .call("Metadata_metadata", ()) - .map_err(|_| "Unable to fetch metadata from blob".to_string())?; - Decode::decode(&mut encoded.as_slice())? + let encoded = runtime_caller.call("Metadata_metadata", ())?; + Decode::decode(&mut encoded.as_slice()).map_err(Into::into) }, - }; - - Ok(subxt::Metadata::decode(&mut (*opaque_metadata).as_slice())?) + } } struct BasicCodeFetcher<'a> { @@ -69,11 +76,11 @@ impl<'a> FetchRuntimeCode for BasicCodeFetcher<'a> { } impl<'a> BasicCodeFetcher<'a> { - pub fn new(code: Cow<'a, [u8]>) -> Self { + fn new(code: Cow<'a, [u8]>) -> Self { Self { hash: sp_crypto_hashing::blake2_256(&code).to_vec(), code } } - pub fn runtime_code(&'a self) -> RuntimeCode<'a> { + fn runtime_code(&'a self) -> RuntimeCode<'a> { RuntimeCode { code_fetcher: self as &'a dyn FetchRuntimeCode, heap_pages: None, @@ -83,17 +90,20 @@ impl<'a> BasicCodeFetcher<'a> { } /// Simple utility that is used to call into the runtime. -struct RuntimeCaller<'a, 'b, HF: HostFunctions> { +pub struct RuntimeCaller<'a, 'b, HF: HostFunctions> { executor: &'b WasmExecutor, code_fetcher: BasicCodeFetcher<'a>, } impl<'a, 'b, HF: HostFunctions> RuntimeCaller<'a, 'b, HF> { + /// Instantiate a new runtime caller. pub fn new(executor: &'b WasmExecutor, code_bytes: Cow<'a, [u8]>) -> Self { Self { executor, code_fetcher: BasicCodeFetcher::new(code_bytes) } } - fn call(&self, method: &str, data: impl Encode) -> sc_executor_common::error::Result> { + /// Calls a runtime function represented by a `method` name and `parity-scale-codec` + /// encodable arguments that will be passed to it. + pub fn call(&self, method: &str, data: impl Encode) -> Result> { let mut ext = BasicExternalities::default(); self.executor .call( @@ -104,24 +114,33 @@ impl<'a, 'b, HF: HostFunctions> RuntimeCaller<'a, 'b, HF> { CallContext::Offchain, ) .0 + .map_err(Into::into) } } #[cfg(test)] mod tests { - use crate::overhead::command::ParachainHostFunctions; use codec::Decode; use sc_executor::WasmExecutor; use sp_version::RuntimeVersion; + type ParachainHostFunctions = ( + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + sp_io::SubstrateHostFunctions, + ); + #[test] fn test_fetch_latest_metadata_from_blob_fetches_metadata() { let executor: WasmExecutor = WasmExecutor::builder().build(); let code_bytes = cumulus_test_runtime::WASM_BINARY .expect("To run this test, build the wasm binary of cumulus-test-runtime") .to_vec(); - let metadata = - super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); + let metadata = subxt::Metadata::decode( + &mut (*super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()) + .unwrap()) + .as_slice(), + ) + .unwrap(); assert!(metadata.pallet_by_name("ParachainInfo").is_some()); } diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index f2fc65ef2439..3981395d9768 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -20,8 +20,6 @@ default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. rocksdb = ["sc-client-db/rocksdb"] -# exposes the client type -test-helpers = [] runtime-benchmarks = [ "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 68ac94539df8..a47a05c0a190 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -25,7 +25,7 @@ use crate::{ start_rpc_servers, BuildGenesisBlock, GenesisBlockBuilder, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, }; -use futures::{future::ready, FutureExt, StreamExt}; +use futures::{select, FutureExt, StreamExt}; use jsonrpsee::RpcModule; use log::info; use prometheus_endpoint::Registry; @@ -90,7 +90,11 @@ use sp_consensus::block_validation::{ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}; -use std::{str::FromStr, sync::Arc, time::SystemTime}; +use std::{ + str::FromStr, + sync::Arc, + time::{Duration, SystemTime}, +}; /// Full client type. pub type TFullClient = @@ -577,22 +581,42 @@ pub async fn propagate_transaction_notifications( Block: BlockT, ExPool: MaintainedTransactionPool::Hash>, { + const TELEMETRY_INTERVAL: Duration = Duration::from_secs(1); + // transaction notifications - transaction_pool - .import_notification_stream() - .for_each(move |hash| { - tx_handler_controller.propagate_transaction(hash); - let status = transaction_pool.status(); - telemetry!( - telemetry; - SUBSTRATE_INFO; - "txpool.import"; - "ready" => status.ready, - "future" => status.future, - ); - ready(()) - }) - .await; + let mut notifications = transaction_pool.import_notification_stream().fuse(); + let mut timer = futures_timer::Delay::new(TELEMETRY_INTERVAL).fuse(); + let mut tx_imported = false; + + loop { + select! { + notification = notifications.next() => { + let Some(hash) = notification else { return }; + + tx_handler_controller.propagate_transaction(hash); + + tx_imported = true; + }, + _ = timer => { + timer = futures_timer::Delay::new(TELEMETRY_INTERVAL).fuse(); + + if !tx_imported { + continue; + } + + tx_imported = false; + let status = transaction_pool.status(); + + telemetry!( + telemetry; + SUBSTRATE_INFO; + "txpool.import"; + "ready" => status.ready, + "future" => status.future, + ); + } + } + } } /// Initialize telemetry with provided configuration and return telemetry handle @@ -731,8 +755,7 @@ where client.clone(), backend.clone(), genesis_hash, - // Defaults to sensible limits for the `Archive`. - sc_rpc_spec_v2::archive::ArchiveConfig::default(), + task_executor.clone(), ) .into_rpc(); rpc_api.merge(archive_v2).map_err(|e| Error::Application(e.into()))?; diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index ce5b92551bf2..eddbb9260c05 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -85,10 +85,8 @@ use std::{ sync::Arc, }; -#[cfg(feature = "test-helpers")] -use { - super::call_executor::LocalCallExecutor, sc_client_api::in_mem, sp_core::traits::CodeExecutor, -}; +use super::call_executor::LocalCallExecutor; +use sp_core::traits::CodeExecutor; type NotificationSinks = Mutex>>; @@ -152,39 +150,6 @@ enum PrepareStorageChangesResult { Discard(ImportResult), Import(Option>), } - -/// Create an instance of in-memory client. -#[cfg(feature = "test-helpers")] -pub fn new_in_mem( - backend: Arc>, - executor: E, - genesis_block_builder: G, - prometheus_registry: Option, - telemetry: Option, - spawn_handle: Box, - config: ClientConfig, -) -> sp_blockchain::Result< - Client, LocalCallExecutor, E>, Block, RA>, -> -where - E: CodeExecutor + sc_executor::RuntimeVersionOf, - Block: BlockT, - G: BuildGenesisBlock< - Block, - BlockImportOperation = as backend::Backend>::BlockImportOperation, - >, -{ - new_with_backend( - backend, - executor, - genesis_block_builder, - spawn_handle, - prometheus_registry, - telemetry, - config, - ) -} - /// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -218,7 +183,6 @@ impl Default for ClientConfig { /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. -#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index ec77a92f162f..3020b3d296f4 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -56,5 +56,4 @@ pub use call_executor::LocalCallExecutor; pub use client::{Client, ClientConfig}; pub(crate) use code_provider::CodeProvider; -#[cfg(feature = "test-helpers")] -pub use self::client::{new_in_mem, new_with_backend}; +pub use self::client::new_with_backend; diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 9c01d7288a81..2a3144a33e1a 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -23,14 +23,11 @@ #![recursion_limit = "1024"] pub mod chain_ops; +pub mod client; pub mod config; pub mod error; mod builder; -#[cfg(feature = "test-helpers")] -pub mod client; -#[cfg(not(feature = "test-helpers"))] -mod client; mod metrics; mod task_manager; @@ -599,8 +596,8 @@ mod tests { let transaction = Transfer { amount: 5, nonce: 0, - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), } .into_unchecked_extrinsic(); block_on(pool.submit_one(best.hash(), source, transaction.clone())).unwrap(); diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 0edfc5b19314..632b98104f6b 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -31,7 +31,7 @@ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 55bbfcdd8594..ef5de93d64ca 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -29,7 +29,7 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; use sc_executor::WasmExecutor; -use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; +use sc_service::client::{new_with_backend, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_core::{testing::TaskExecutor, traits::CallContext, H256}; @@ -48,8 +48,8 @@ use substrate_test_runtime_client::{ genesismap::{insert_genesis_block, GenesisStorageBuilder}, Block, BlockNumber, Digest, Hash, Header, RuntimeApi, Transfer, }, - AccountKeyring, BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, - Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, Sr25519Keyring, + TestClientBuilder, TestClientBuilderExt, }; mod db; @@ -126,8 +126,8 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> Vec 1, genesis_hash, vec![Transfer { - from: AccountKeyring::One.into(), - to: AccountKeyring::Two.into(), + from: Sr25519Keyring::One.into(), + to: Sr25519Keyring::Two.into(), amount: 69 * DOLLARS, nonce: 0, }], @@ -158,7 +158,7 @@ fn finality_notification_check( fn construct_genesis_should_work_with_native() { let mut storage = GenesisStorageBuilder::new( vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], 1000 * DOLLARS, ) .build(); @@ -189,7 +189,7 @@ fn construct_genesis_should_work_with_native() { fn construct_genesis_should_work_with_wasm() { let mut storage = GenesisStorageBuilder::new( vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], 1000 * DOLLARS, ) .build(); @@ -223,14 +223,14 @@ fn client_initializes_from_genesis_ok() { assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) .unwrap(), 1000 * DOLLARS ); assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Ferdie.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Ferdie.into()) .unwrap(), 0 * DOLLARS ); @@ -266,8 +266,8 @@ fn block_builder_works_with_transactions() { builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42 * DOLLARS, nonce: 0, }) @@ -301,14 +301,14 @@ fn block_builder_works_with_transactions() { assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) .unwrap(), 958 * DOLLARS ); assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Ferdie.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Ferdie.into()) .unwrap(), 42 * DOLLARS ); @@ -325,8 +325,8 @@ fn block_builder_does_not_include_invalid() { builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42 * DOLLARS, nonce: 0, }) @@ -334,8 +334,8 @@ fn block_builder_does_not_include_invalid() { assert!(builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 30 * DOLLARS, nonce: 0, }) @@ -491,8 +491,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -531,8 +531,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 1, }) @@ -549,8 +549,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -691,8 +691,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -732,8 +732,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 1, }) @@ -751,8 +751,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -982,8 +982,8 @@ fn finality_target_with_best_not_on_longest_chain() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -1134,8 +1134,8 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1195,8 +1195,8 @@ fn finalizing_diverged_block_should_trigger_reorg() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1303,8 +1303,8 @@ fn finality_notifications_content() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -1329,8 +1329,8 @@ fn finality_notifications_content() { .unwrap(); // needed to make sure B1 gets a different hash from A1 c1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1346,8 +1346,8 @@ fn finality_notifications_content() { // needed to make sure D3 gets a different hash from A3 d3.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1415,7 +1415,7 @@ fn state_reverted_on_reorg() { let current_balance = |client: &substrate_test_runtime_client::TestClient| { client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) .unwrap() }; @@ -1428,8 +1428,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); a1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 10 * DOLLARS, nonce: 0, }) @@ -1443,8 +1443,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 50 * DOLLARS, nonce: 0, }) @@ -1460,8 +1460,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); a2.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Charlie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Charlie.into(), amount: 10 * DOLLARS, nonce: 1, }) @@ -1530,8 +1530,8 @@ fn doesnt_import_blocks_that_revert_finality() { // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1580,8 +1580,8 @@ fn doesnt_import_blocks_that_revert_finality() { // needed to make sure C1 gets a different hash from A1 and B1 c1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1788,8 +1788,8 @@ fn returns_status_for_pruned_blocks() { // b1 is created, but not imported b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -2087,13 +2087,13 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_in_mem::<_, Block, _, RuntimeApi>( + let mut client = new_with_backend::<_, _, Block, _, RuntimeApi>( backend, executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(); @@ -2191,8 +2191,8 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index f87e8b66f731..db325a94ab21 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = { workspace = true } futures = { workspace = true } -libp2p = { features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"], workspace = true } +libp2p = { features = ["dns", "tcp", "tokio", "websocket"], workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } pin-project = { workspace = true } diff --git a/substrate/client/telemetry/src/node.rs b/substrate/client/telemetry/src/node.rs index 0bbdbfb622ef..2c8d424c4340 100644 --- a/substrate/client/telemetry/src/node.rs +++ b/substrate/client/telemetry/src/node.rs @@ -18,7 +18,13 @@ use crate::TelemetryPayload; use futures::{channel::mpsc, prelude::*}; -use libp2p::{core::transport::Transport, Multiaddr}; +use libp2p::{ + core::{ + transport::{DialOpts, PortUse, Transport}, + Endpoint, + }, + Multiaddr, +}; use rand::Rng as _; use std::{ fmt, mem, @@ -229,7 +235,10 @@ where }, NodeSocket::ReconnectNow => { let addr = self.addr.clone(); - match self.transport.dial(addr) { + match self + .transport + .dial(addr, DialOpts { role: Endpoint::Dialer, port_use: PortUse::New }) + { Ok(d) => { log::trace!(target: "telemetry", "Re-dialing {}", self.addr); socket = NodeSocket::Dialing(d); diff --git a/substrate/client/tracing/src/logging/directives.rs b/substrate/client/tracing/src/logging/directives.rs index a99e9c4c8909..811511bb20f5 100644 --- a/substrate/client/tracing/src/logging/directives.rs +++ b/substrate/client/tracing/src/logging/directives.rs @@ -40,7 +40,7 @@ pub(crate) fn add_default_directives(directives: &str) { add_directives(directives); } -/// Add directives to current directives +/// Add directives to current directives. pub fn add_directives(directives: &str) { CURRENT_DIRECTIVES .get_or_init(|| Mutex::new(Vec::new())) @@ -48,6 +48,11 @@ pub fn add_directives(directives: &str) { .push(directives.to_owned()); } +/// Returns the current directives. +pub fn get_directives() -> Vec { + CURRENT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().clone() +} + /// Parse `Directive` and add to default directives if successful. /// /// Ensures the supplied directive will be restored when resetting the log filter. diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 0d8c1cbba9b4..5e40b0fb72d6 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -152,7 +152,7 @@ fn uxt(transfer: TransferData) -> Extrinsic { } fn bench_configured(pool: Pool, number: u64, api: Arc) { - let source = TransactionSource::External; + let source = TimedTransactionSource::new_external(false); let mut futures = Vec::new(); let mut tags = Vec::new(); let at = HashAndNumber { @@ -171,7 +171,7 @@ fn bench_configured(pool: Pool, number: u64, api: Arc) { tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&at, source, xt)); + futures.push(pool.submit_one(&at, source.clone(), xt)); } let res = block_on(futures::future::join_all(futures.into_iter())); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index ecae21395c91..7679e3b169d2 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -24,7 +24,7 @@ use crate::{ common::log_xt::log_xt_trace, fork_aware_txpool::stream_map_util::next_event, - graph::{BlockHash, ChainApi, ExtrinsicHash}, + graph::{self, BlockHash, ExtrinsicHash}, LOG_TARGET, }; use futures::stream::StreamExt; @@ -33,12 +33,44 @@ use sc_transaction_pool_api::TransactionStatus; use sc_utils::mpsc; use sp_runtime::traits::Block as BlockT; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{ + hash_map::{Entry, OccupiedEntry}, + HashMap, HashSet, + }, fmt::{self, Debug, Formatter}, pin::Pin, }; use tokio_stream::StreamMap; +/// Represents a transaction that was removed from the transaction pool, including the reason of its +/// removal. +#[derive(Debug, PartialEq)] +pub struct DroppedTransaction { + /// Hash of the dropped extrinsic. + pub tx_hash: Hash, + /// Reason of the transaction being dropped. + pub reason: DroppedReason, +} + +impl DroppedTransaction { + fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + Self { reason: DroppedReason::Usurped(by), tx_hash } + } + + fn new_enforced_by_limts(tx_hash: Hash) -> Self { + Self { reason: DroppedReason::LimitsEnforced, tx_hash } + } +} + +/// Provides reason of why transactions was dropped. +#[derive(Debug, PartialEq)] +pub enum DroppedReason { + /// Transaction was replaced by other transaction (e.g. because of higher priority). + Usurped(Hash), + /// Transaction was dropped because of internal pool limits being enforced. + LimitsEnforced, +} + /// Dropped-logic related event from the single view. pub type ViewStreamEvent = crate::graph::DroppedByLimitsEvent, BlockHash>; @@ -47,7 +79,8 @@ type ViewStream = Pin> + Se /// Stream of extrinsic hashes that were dropped by the views and have no references by existing /// views. -pub(crate) type StreamOfDropped = Pin> + Send>>; +pub(crate) type StreamOfDropped = + Pin>> + Send>>; /// A type alias for a sender used as the controller of the [`MultiViewDropWatcherContext`]. /// Used to send control commands from the [`MultiViewDroppedWatcherController`] to @@ -59,24 +92,24 @@ type Controller = mpsc::TracingUnboundedSender; type CommandReceiver = mpsc::TracingUnboundedReceiver; /// Commands to control the instance of dropped transactions stream [`StreamOfDropped`]. -enum Command +enum Command where - C: ChainApi, + ChainApi: graph::ChainApi, { /// Adds a new stream of dropped-related events originating in a view with a specific block /// hash - AddView(BlockHash, ViewStream), + AddView(BlockHash, ViewStream), /// Removes an existing view's stream associated with a specific block hash. - RemoveView(BlockHash), - /// Removes internal states for given extrinsic hashes. + RemoveView(BlockHash), + /// Removes referencing views for given extrinsic hashes. /// /// Intended to ba called on finalization. - RemoveFinalizedTxs(Vec>), + RemoveFinalizedTxs(Vec>), } -impl Debug for Command +impl Debug for Command where - C: ChainApi, + ChainApi: graph::ChainApi, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { @@ -92,30 +125,114 @@ where /// /// This struct maintains a mapping of active views and their corresponding streams, as well as the /// state of each transaction with respect to these views. -struct MultiViewDropWatcherContext +struct MultiViewDropWatcherContext where - C: ChainApi, + ChainApi: graph::ChainApi, { /// A map that associates the views identified by corresponding block hashes with their streams /// of dropped-related events. This map is used to keep track of active views and their event /// streams. - stream_map: StreamMap, ViewStream>, + stream_map: StreamMap, ViewStream>, /// A receiver for commands to control the state of the stream, allowing the addition and /// removal of views. This is used to dynamically update which views are being tracked. - command_receiver: CommandReceiver>, - + command_receiver: CommandReceiver>, /// For each transaction hash we keep the set of hashes representing the views that see this - /// transaction as ready or future. + /// transaction as ready or in_block. + /// + /// Even if all views referencing a ready transactions are removed, we still want to keep + /// transaction, there can be a fork which sees the transaction as ready. /// /// Once transaction is dropped, dropping view is removed from the set. - transaction_states: HashMap, HashSet>>, + ready_transaction_views: HashMap, HashSet>>, + /// For each transaction hash we keep the set of hashes representing the views that see this + /// transaction as future. + /// + /// Once all views referencing a future transactions are removed, the future can be dropped. + /// + /// Once transaction is dropped, dropping view is removed from the set. + future_transaction_views: HashMap, HashSet>>, + + /// Transactions that need to be notified as dropped. + pending_dropped_transactions: Vec>, } impl MultiViewDropWatcherContext where - C: ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + C: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { + /// Provides the ready or future `HashSet` containing views referencing given transaction. + fn transaction_views( + &mut self, + tx_hash: ExtrinsicHash, + ) -> Option, HashSet>>> { + if let Entry::Occupied(views_keeping_tx_valid) = self.ready_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + if let Entry::Occupied(views_keeping_tx_valid) = + self.future_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + None + } + + /// Processes the command and updates internal state accordingly. + fn handle_command(&mut self, cmd: Command) { + match cmd { + Command::AddView(key, stream) => { + trace!( + target: LOG_TARGET, + "dropped_watcher: Command::AddView {key:?} views:{:?}", + self.stream_map.keys().collect::>() + ); + self.stream_map.insert(key, stream); + }, + Command::RemoveView(key) => { + trace!( + target: LOG_TARGET, + "dropped_watcher: Command::RemoveView {key:?} views:{:?}", + self.stream_map.keys().collect::>() + ); + self.stream_map.remove(&key); + self.ready_transaction_views.iter_mut().for_each(|(tx_hash, views)| { + trace!( + target: LOG_TARGET, + "[{:?}] dropped_watcher: Command::RemoveView ready views: {:?}", + tx_hash, + views + ); + views.remove(&key); + }); + + self.future_transaction_views.iter_mut().for_each(|(tx_hash, views)| { + trace!( + target: LOG_TARGET, + "[{:?}] dropped_watcher: Command::RemoveView future views: {:?}", + tx_hash, + views + ); + views.remove(&key); + if views.is_empty() { + self.pending_dropped_transactions.push(*tx_hash); + } + }); + }, + Command::RemoveFinalizedTxs(xts) => { + log_xt_trace!( + target: LOG_TARGET, + xts.clone(), + "[{:?}] dropped_watcher: finalized xt removed" + ); + xts.iter().for_each(|xt| { + self.ready_transaction_views.remove(xt); + self.future_transaction_views.remove(xt); + }); + }, + } + } + /// Processes a `ViewStreamEvent` from a specific view and updates the internal state /// accordingly. /// @@ -125,41 +242,69 @@ where &mut self, block_hash: BlockHash, event: ViewStreamEvent, - ) -> Option> { + ) -> Option>> { trace!( target: LOG_TARGET, - "dropped_watcher: handle_event: event:{:?} views:{:?}, ", - event, + "dropped_watcher: handle_event: event:{event:?} from:{block_hash:?} future_views:{:?} ready_views:{:?} stream_map views:{:?}, ", + self.future_transaction_views.get(&event.0), + self.ready_transaction_views.get(&event.0), self.stream_map.keys().collect::>(), ); let (tx_hash, status) = event; match status { - TransactionStatus::Ready | TransactionStatus::Future => { - self.transaction_states.entry(tx_hash).or_default().insert(block_hash); + TransactionStatus::Future => { + self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash); + }, + TransactionStatus::Ready | TransactionStatus::InBlock(..) => { + // note: if future transaction was once seens as the ready we may want to treat it + // as ready transactions. Unreferenced future transactions are more likely to be + // removed when the last referencing view is removed then ready transactions. + // Transcaction seen as ready is likely quite close to be included in some + // future fork. + if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { + views.insert(block_hash); + self.ready_transaction_views.insert(tx_hash, views); + } else { + self.ready_transaction_views.entry(tx_hash).or_default().insert(block_hash); + } }, - TransactionStatus::Dropped | TransactionStatus::Usurped(_) => { - if let Entry::Occupied(mut views_keeping_tx_valid) = - self.transaction_states.entry(tx_hash) - { + TransactionStatus::Dropped => { + if let Some(mut views_keeping_tx_valid) = self.transaction_views(tx_hash) { views_keeping_tx_valid.get_mut().remove(&block_hash); - if views_keeping_tx_valid.get().is_empty() || - views_keeping_tx_valid - .get() - .iter() - .all(|h| !self.stream_map.contains_key(h)) - { - return Some(tx_hash) + if views_keeping_tx_valid.get().is_empty() { + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } } else { debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); - return Some(tx_hash) + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } }, + TransactionStatus::Usurped(by) => + return Some(DroppedTransaction::new_usurped(tx_hash, by)), _ => {}, }; None } + /// Gets pending dropped transactions if any. + fn get_pending_dropped_transaction(&mut self) -> Option>> { + while let Some(tx_hash) = self.pending_dropped_transactions.pop() { + // never drop transaction that was seen as ready. It may not have a referencing + // view now, but such fork can appear. + if self.ready_transaction_views.get(&tx_hash).is_some() { + continue + } + + if let Some(views) = self.future_transaction_views.get(&tx_hash) { + if views.is_empty() { + self.future_transaction_views.remove(&tx_hash); + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + } + } + None + } + /// Creates a new `StreamOfDropped` and its associated event stream controller. /// /// This method initializes the internal structures and unfolds the stream of dropped @@ -176,42 +321,29 @@ where let ctx = Self { stream_map: StreamMap::new(), command_receiver, - transaction_states: Default::default(), + ready_transaction_views: Default::default(), + future_transaction_views: Default::default(), + pending_dropped_transactions: Default::default(), }; let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { + if let Some(dropped) = ctx.get_pending_dropped_transaction() { + debug!("dropped_watcher: sending out (pending): {dropped:?}"); + return Some((dropped, ctx)); + } tokio::select! { biased; - cmd = ctx.command_receiver.next() => { - match cmd? { - Command::AddView(key,stream) => { - trace!(target: LOG_TARGET,"dropped_watcher: Command::AddView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); - ctx.stream_map.insert(key,stream); - }, - Command::RemoveView(key) => { - trace!(target: LOG_TARGET,"dropped_watcher: Command::RemoveView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); - ctx.stream_map.remove(&key); - ctx.transaction_states.iter_mut().for_each(|(_,state)| { - state.remove(&key); - }); - }, - Command::RemoveFinalizedTxs(xts) => { - log_xt_trace!(target: LOG_TARGET, xts.clone(), "[{:?}] dropped_watcher: finalized xt removed"); - xts.iter().for_each(|xt| { - ctx.transaction_states.remove(xt); - }); - - }, - } - }, - Some(event) = next_event(&mut ctx.stream_map) => { if let Some(dropped) = ctx.handle_event(event.0, event.1) { debug!("dropped_watcher: sending out: {dropped:?}"); return Some((dropped, ctx)); } + }, + cmd = ctx.command_receiver.next() => { + ctx.handle_command(cmd?); } + } } }) @@ -225,30 +357,30 @@ where /// /// This struct provides methods to add and remove streams associated with views to and from the /// stream. -pub struct MultiViewDroppedWatcherController { +pub struct MultiViewDroppedWatcherController { /// A controller allowing to update the state of the associated [`StreamOfDropped`]. - controller: Controller>, + controller: Controller>, } -impl Clone for MultiViewDroppedWatcherController { +impl Clone for MultiViewDroppedWatcherController { fn clone(&self) -> Self { Self { controller: self.controller.clone() } } } -impl MultiViewDroppedWatcherController +impl MultiViewDroppedWatcherController where - C: ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + ChainApi: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { /// Creates new [`StreamOfDropped`] and its controller. - pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { - let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); + pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { + let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); (Self { controller: ctrl }, stream_map.boxed()) } /// Notifies the [`StreamOfDropped`] that new view was created. - pub fn add_view(&self, key: BlockHash, view: ViewStream) { + pub fn add_view(&self, key: BlockHash, view: ViewStream) { let _ = self.controller.unbounded_send(Command::AddView(key, view)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: add_view {key:?} send message failed: {e}"); }); @@ -256,14 +388,17 @@ where /// Notifies the [`StreamOfDropped`] that the view was destroyed and shall be removed the /// stream map. - pub fn remove_view(&self, key: BlockHash) { + pub fn remove_view(&self, key: BlockHash) { let _ = self.controller.unbounded_send(Command::RemoveView(key)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: remove_view {key:?} send message failed: {e}"); }); } /// Removes status info for finalized transactions. - pub fn remove_finalized_txs(&self, xts: impl IntoIterator> + Clone) { + pub fn remove_finalized_txs( + &self, + xts: impl IntoIterator> + Clone, + ) { let _ = self .controller .unbounded_send(Command::RemoveFinalizedTxs(xts.into_iter().collect())) @@ -298,7 +433,7 @@ mod dropped_watcher_tests { watcher.add_view(block_hash, view_stream); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } #[tokio::test] @@ -348,7 +483,10 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash1]); + assert_eq!( + handle.await.unwrap(), + vec![DroppedTransaction::new_enforced_by_limts(tx_hash1)] + ); } #[tokio::test] @@ -373,10 +511,11 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); assert!(output_stream.next().now_or_never().is_none()); + watcher.remove_view(block_hash0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } #[tokio::test] @@ -419,6 +558,6 @@ mod dropped_watcher_tests { let block_hash2 = H256::repeat_byte(0x03); watcher.add_view(block_hash2, view_stream2); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 065d0cb3a274..4ec87f1fefa4 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -23,7 +23,7 @@ use super::{ import_notification_sink::MultiViewImportNotificationSink, metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, - tx_mem_pool::{TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, + tx_mem_pool::{InsertionInfo, TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, view::View, view_store::ViewStore, }; @@ -31,8 +31,12 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::revalidation_worker, - graph::{self, base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, IsValidator, Options}, + fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, IsValidator, Options, + }, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; @@ -197,9 +201,14 @@ where let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); + let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), + view_store.clone(), import_notification_sink.clone(), ); @@ -216,8 +225,8 @@ where ( Self { mempool, - api: pool_api.clone(), - view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), + api: pool_api, + view_store, ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -233,14 +242,17 @@ where ) } - /// Monitors the stream of dropped transactions and removes them from the mempool. + /// Monitors the stream of dropped transactions and removes them from the mempool and + /// view_store. /// /// This asynchronous task continuously listens for dropped transaction notifications provided /// within `dropped_stream` and ensures that these transactions are removed from the `mempool` - /// and `import_notification_sink` instances. + /// and `import_notification_sink` instances. For Usurped events, the transaction is also + /// removed from the view_store. async fn dropped_monitor_task( mut dropped_stream: StreamOfDropped, mempool: Arc>, + view_store: Arc>, import_notification_sink: MultiViewImportNotificationSink< Block::Hash, ExtrinsicHash, @@ -251,9 +263,33 @@ where log::debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); break; }; - log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification, removing", dropped); - mempool.remove_dropped_transactions(&[dropped]).await; - import_notification_sink.clean_notified_items(&[dropped]); + let dropped_tx_hash = dropped.tx_hash; + log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification {:?}, removing", dropped_tx_hash,dropped.reason); + match dropped.reason { + DroppedReason::Usurped(new_tx_hash) => { + if let Some(new_tx) = mempool.get_by_hash(new_tx_hash) { + view_store + .replace_transaction( + new_tx.source(), + new_tx.tx(), + dropped_tx_hash, + new_tx.is_watched(), + ) + .await; + } else { + log::trace!( + target:LOG_TARGET, + "error: dropped_monitor_task: no entry in mempool for new transaction {:?}", + new_tx_hash, + ); + } + }, + DroppedReason::LimitsEnforced => {}, + }; + + mempool.remove_dropped_transaction(&dropped_tx_hash).await; + view_store.listener.transaction_dropped(dropped); + import_notification_sink.clean_notified_items(&[dropped_tx_hash]); } } @@ -288,9 +324,13 @@ where let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), + view_store.clone(), import_notification_sink.clone(), ); @@ -306,8 +346,8 @@ where Self { mempool, - api: pool_api.clone(), - view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), + api: pool_api, + view_store, ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -366,6 +406,16 @@ where self.mempool.unwatched_and_watched_count() } + /// Returns a set of future transactions for given block hash. + /// + /// Intended for logging / tests. + pub fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.view_store.futures_at(at) + } + /// Returns a best-effort set of ready transactions for a given block, without executing full /// maintain process. /// @@ -600,31 +650,33 @@ where let mempool_results = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return Ok(mempool_results) + return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) } let to_be_submitted = mempool_results .iter() .zip(xts) - .filter_map(|(result, xt)| result.as_ref().ok().map(|_| xt)) + .filter_map(|(result, xt)| { + result.as_ref().ok().map(|insertion| (insertion.source.clone(), xt)) + }) .collect::>(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); let mempool = self.mempool.clone(); - let results_map = view_store.submit(source, to_be_submitted.into_iter()).await; + let results_map = view_store.submit(to_be_submitted.into_iter()).await; let mut submission_results = reduce_multiview_result(results_map).into_iter(); Ok(mempool_results .into_iter() .map(|result| { - result.and_then(|xt_hash| { + result.and_then(|insertion| { submission_results .next() .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") .inspect_err(|_| - mempool.remove(xt_hash) + mempool.remove(insertion.hash) ) }) }) @@ -660,19 +712,18 @@ where ) -> Result>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let xt_hash = match self.mempool.push_watched(source, xt.clone()) { - Ok(xt_hash) => xt_hash, - Err(e) => return Err(e), - }; + let InsertionInfo { hash: xt_hash, source: timed_source } = + match self.mempool.push_watched(source, xt.clone()) { + Ok(result) => result, + Err(e) => return Err(e), + }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - let view_store = self.view_store.clone(); - let mempool = self.mempool.clone(); - view_store - .submit_and_watch(at, source, xt) + self.view_store + .submit_and_watch(at, timed_source, xt) .await - .inspect_err(|_| mempool.remove(xt_hash)) + .inspect_err(|_| self.mempool.remove(xt_hash)) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -801,12 +852,12 @@ where ) -> Result { log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); let xt = Arc::from(xt); - let result = self + let InsertionInfo { hash: xt_hash, .. } = self .mempool .extend_unwatched(TransactionSource::Local, &[xt.clone()]) .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(result)) + self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) } } @@ -914,6 +965,9 @@ where let start = Instant::now(); let watched_xts = self.register_listeners(&mut view).await; let duration = start.elapsed(); + // sync the transactions statuses and referencing views in all the listeners with newly + // cloned view. + view.pool.validated_pool().retrigger_notifications(); log::debug!(target: LOG_TARGET, "register_listeners: at {at:?} took {duration:?}"); // 2. Handle transactions from the tree route. Pruning transactions from the view first @@ -1041,58 +1095,35 @@ where self.active_views_count() ); let included_xts = self.extrinsics_included_since_finalized(view.at.hash).await; - let xts = self.mempool.clone_unwatched(); - - let mut all_submitted_count = 0; - if !xts.is_empty() { - let unwatched_count = xts.len(); - let mut buckets = HashMap::>>::default(); - xts.into_iter() - .filter(|(hash, _)| !view.pool.validated_pool().pool.read().is_imported(hash)) - .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(_, tx)| (tx.source(), tx.tx())) - .for_each(|(source, tx)| buckets.entry(source).or_default().push(tx)); - - for (source, xts) in buckets { - all_submitted_count += xts.len(); - let _ = view.submit_many(source, xts).await; - } - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} unwatched {}/{}", view.at.hash, all_submitted_count, unwatched_count); - } - - let watched_submitted_count = watched_xts.len(); - let mut buckets = HashMap::< - TransactionSource, - Vec<(ExtrinsicHash, ExtrinsicFor)>, - >::default(); - watched_xts + let (hashes, xts_filtered): (Vec<_>, Vec<_>) = watched_xts .into_iter() + .chain(self.mempool.clone_unwatched().into_iter()) + .filter(|(hash, _)| !view.is_imported(hash)) .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(tx_hash, tx)| (tx.source(), tx_hash, tx.tx())) - .for_each(|(source, tx_hash, tx)| { - buckets.entry(source).or_default().push((tx_hash, tx)) - }); + .map(|(tx_hash, tx)| (tx_hash, (tx.source(), tx.tx()))) + .unzip(); - let mut watched_results = Vec::default(); - for (source, watched_xts) in buckets { - let hashes = watched_xts.iter().map(|i| i.0).collect::>(); - let results = view - .submit_many(source, watched_xts.into_iter().map(|i| i.1)) - .await - .into_iter() - .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) - .collect::>(); - watched_results.extend(results); - } + let watched_results = view + .submit_many(xts_filtered) + .await + .into_iter() + .zip(hashes) + .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .collect::>(); + + let submitted_count = watched_results.len(); - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} watched {}/{}", view.at.hash, watched_submitted_count, self.mempool_len().1); + log::debug!( + target: LOG_TARGET, + "update_view_with_mempool: at {:?} submitted {}/{}", + view.at.hash, + submitted_count, + self.mempool.len() + ); - all_submitted_count += watched_submitted_count; - let _ = all_submitted_count - .try_into() - .map(|v| self.metrics.report(|metrics| metrics.submitted_from_mempool_txs.inc_by(v))); + self.metrics + .report(|metrics| metrics.submitted_from_mempool_txs.inc_by(submitted_count as _)); // if there are no views yet, and a single newly created view is reporting error, just send // out the invalid event, and remove transaction. @@ -1176,7 +1207,14 @@ where }) .map(|(tx_hash, tx)| { //find arc if tx is known - self.mempool.get_by_hash(tx_hash).unwrap_or_else(|| Arc::from(tx)) + self.mempool + .get_by_hash(tx_hash) + .map(|tx| (tx.source(), tx.tx())) + .unwrap_or_else(|| { + // These transactions are coming from retracted blocks, we + // should simply consider them external. + (TimedTransactionSource::new_external(true), Arc::from(tx)) + }) }), ); @@ -1185,16 +1223,7 @@ where }); } - let _ = view - .pool - .resubmit_at( - &hash_and_number, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await; + let _ = view.pool.resubmit_at(&hash_and_number, resubmit_transactions).await; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs index 7fbdcade63b8..f9a41673bb8f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs @@ -326,6 +326,7 @@ mod tests { let j0 = tokio::spawn(runnable); let stream = ctrl.event_stream(); + let stream2 = ctrl.event_stream(); let mut v1 = View::new(vec![(10, 1), (10, 2), (10, 3)]); let mut v2 = View::new(vec![(20, 1), (20, 2), (20, 6)]); @@ -342,20 +343,16 @@ mod tests { ctrl.add_view(1000, o1); ctrl.add_view(2000, o2); - let j4 = { - let ctrl = ctrl.clone(); - tokio::spawn(async move { - tokio::time::sleep(Duration::from_millis(70)).await; - ctrl.clean_notified_items(&vec![1, 3]); - ctrl.add_view(3000, o3.boxed()); - }) - }; + let out = stream.take(4).collect::>().await; + assert_eq!(out, vec![1, 2, 3, 6]); - let out = stream.take(6).collect::>().await; + ctrl.clean_notified_items(&vec![1, 3]); + ctrl.add_view(3000, o3.boxed()); + let out = stream2.take(6).collect::>().await; assert_eq!(out, vec![1, 2, 3, 6, 1, 3]); - drop(ctrl); - futures::future::join_all(vec![j0, j1, j2, j3, j4]).await; + drop(ctrl); + futures::future::join_all(vec![j0, j1, j2, j3]).await; } #[tokio::test] diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index 8d0e69db2e9a..a00234a99808 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -36,6 +36,8 @@ use std::{ }; use tokio_stream::StreamMap; +use super::dropped_watcher::{DroppedReason, DroppedTransaction}; + /// A side channel allowing to control the external stream instance (one per transaction) with /// [`ControllerCommand`]. /// @@ -79,7 +81,7 @@ enum ControllerCommand { /// Notifies that a transaction was dropped from the pool. /// /// If all preconditions are met, an external dropped event will be sent out. - TransactionDropped, + TransactionDropped(DroppedReason>), } impl std::fmt::Debug for ControllerCommand @@ -99,8 +101,8 @@ where ControllerCommand::TransactionBroadcasted(_) => { write!(f, "ListenerAction::TransactionBroadcasted(...)") }, - ControllerCommand::TransactionDropped => { - write!(f, "ListenerAction::TransactionDropped") + ControllerCommand::TransactionDropped(r) => { + write!(f, "ListenerAction::TransactionDropped {r:?}") }, } } @@ -268,6 +270,7 @@ where /// stream map. fn remove_view(&mut self, block_hash: BlockHash) { self.status_stream_map.remove(&block_hash); + self.views_keeping_tx_valid.remove(&block_hash); trace!(target: LOG_TARGET, "[{:?}] RemoveView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); } } @@ -282,6 +285,11 @@ where Self { controllers: Default::default() } } + /// Returns `true` if the listener contains a stream controller for the specified hash. + pub fn contains_tx(&self, tx_hash: &ExtrinsicHash) -> bool { + self.controllers.read().contains_key(tx_hash) + } + /// Creates an external aggregated stream of events for given transaction. /// /// This method initializes an `ExternalWatcherContext` for the provided transaction hash, sets @@ -346,11 +354,16 @@ where log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Broadcasted", ctx.tx_hash); return Some((TransactionStatus::Broadcast(peers), ctx)) }, - ControllerCommand::TransactionDropped => { + ControllerCommand::TransactionDropped(DroppedReason::LimitsEnforced) => { log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Dropped", ctx.tx_hash); ctx.terminate = true; return Some((TransactionStatus::Dropped, ctx)) }, + ControllerCommand::TransactionDropped(DroppedReason::Usurped(by)) => { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Usurped({:?})", ctx.tx_hash, by); + ctx.terminate = true; + return Some((TransactionStatus::Usurped(by), ctx)) + }, } }, }; @@ -445,16 +458,15 @@ where /// /// This method sends a `TransactionDropped` command to the controller of each requested /// transaction prompting and external `Broadcasted` event. - pub(crate) fn transactions_dropped(&self, dropped: &[ExtrinsicHash]) { + pub(crate) fn transaction_dropped(&self, dropped: DroppedTransaction>) { let mut controllers = self.controllers.write(); - debug!(target: LOG_TARGET, "mvl::transactions_dropped: {:?}", dropped); - for tx_hash in dropped { - if let Some(tx) = controllers.remove(&tx_hash) { - debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); - if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped) { - trace!(target: LOG_TARGET, "[{:?}] transactions_dropped: send message failed: {:?}", tx_hash, e); - }; - } + debug!(target: LOG_TARGET, "mvl::transaction_dropped: {:?}", dropped); + if let Some(tx) = controllers.remove(&dropped.tx_hash) { + let DroppedTransaction { tx_hash, reason } = dropped; + debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); + if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped(reason)) { + trace!(target: LOG_TARGET, "[{:?}] transaction_dropped: send message failed: {:?}", tx_hash, e); + }; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs index 9464ab3f5766..e1c65a08a70b 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs @@ -186,11 +186,11 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, fork_aware_txpool::view::FinishRevalidationLocalChannels, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; - use substrate_test_runtime_client::AccountKeyring::Alice; + use substrate_test_runtime_client::Sr25519Keyring::Alice; #[test] fn revalidation_queue_works() { let api = Arc::new(TestApi::default()); @@ -212,9 +212,10 @@ mod tests { nonce: 0, }); - let _ = block_on( - view.submit_many(TransactionSource::External, std::iter::once(uxt.clone().into())), - ); + let _ = block_on(view.submit_many(std::iter::once(( + TimedTransactionSource::new_external(false), + uxt.clone().into(), + )))); assert_eq!(api.validation_requests().len(), 1); let (finish_revalidation_request_tx, finish_revalidation_request_rx) = diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 86c07008c3f3..989ae4425dc4 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -30,7 +30,7 @@ use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::Mult use crate::{ common::log_xt::log_xt_trace, graph, - graph::{tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, + graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, LOG_TARGET, }; use futures::FutureExt; @@ -74,7 +74,7 @@ where /// Size of the extrinsics actual body. bytes: usize, /// Transaction source. - source: TransactionSource, + source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, //todo: we need to add future / ready status at finalized block. @@ -95,18 +95,30 @@ where /// Shall the progress of transaction be watched. /// /// Was transaction sent with `submit_and_watch`. - fn is_watched(&self) -> bool { + pub(crate) fn is_watched(&self) -> bool { self.watched } /// Creates a new instance of wrapper for unwatched transaction. fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { watched: false, tx, source, validated_at: AtomicU64::new(0), bytes } + Self { + watched: false, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } } /// Creates a new instance of wrapper for watched transaction. fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { watched: true, tx, source, validated_at: AtomicU64::new(0), bytes } + Self { + watched: true, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } } /// Provides a clone of actual transaction body. @@ -117,8 +129,8 @@ where } /// Returns the source of the transaction. - pub(crate) fn source(&self) -> TransactionSource { - self.source + pub(crate) fn source(&self) -> TimedTransactionSource { + self.source.clone() } } @@ -174,6 +186,19 @@ where max_transactions_total_bytes: usize, } +/// Helper structure to encapsulate a result of [`TxMemPool::try_insert`]. +#[derive(Debug)] +pub(super) struct InsertionInfo { + pub(super) hash: Hash, + pub(super) source: TimedTransactionSource, +} + +impl InsertionInfo { + fn new(hash: Hash, source: TimedTransactionSource) -> Self { + Self { hash, source } + } +} + impl TxMemPool where Block: BlockT, @@ -220,8 +245,8 @@ where pub(super) fn get_by_hash( &self, hash: ExtrinsicHash, - ) -> Option> { - self.transactions.read().get(&hash).map(|t| t.tx()) + ) -> Option>> { + self.transactions.read().get(&hash).map(Clone::clone) } /// Returns a tuple with the count of unwatched and watched transactions in the memory pool. @@ -231,6 +256,11 @@ where (transactions.len() - watched_count, watched_count) } + /// Returns a total number of transactions kept within mempool. + pub fn len(&self) -> usize { + self.transactions.read().len() + } + /// Returns the number of bytes used by all extrinsics in the the pool. #[cfg(test)] pub fn bytes(&self) -> usize { @@ -249,7 +279,7 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result, ChainApi::Error> { + ) -> Result>, ChainApi::Error> { let bytes = self.transactions.bytes(); let mut transactions = self.transactions.write(); let result = match ( @@ -257,14 +287,15 @@ where transactions.contains_key(&hash), ) { (true, false) => { + let source = tx.source(); transactions.insert(hash, Arc::from(tx)); - Ok(hash) + Ok(InsertionInfo::new(hash, source)) }, (_, true) => Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), }; - log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result); + log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); result } @@ -277,7 +308,7 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec, ChainApi::Error>> { + ) -> Vec>, ChainApi::Error>> { let result = xts .iter() .map(|xt| { @@ -294,25 +325,18 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result>, ChainApi::Error> { let (hash, length) = self.api.hash_and_length(&xt); self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } - /// Removes transactions from the memory pool which are specified by the given list of hashes - /// and send the `Dropped` event to the listeners of these transactions. - pub(super) async fn remove_dropped_transactions( + /// Removes transaction from the memory pool which are specified by the given list of hashes. + pub(super) async fn remove_dropped_transaction( &self, - to_be_removed: &[ExtrinsicHash], - ) { - log::debug!(target: LOG_TARGET, "remove_dropped_transactions count:{:?}", to_be_removed.len()); - log_xt_trace!(target: LOG_TARGET, to_be_removed, "[{:?}] mempool::remove_dropped_transactions"); - let mut transactions = self.transactions.write(); - to_be_removed.iter().for_each(|t| { - transactions.remove(t); - }); - - self.listener.transactions_dropped(to_be_removed); + dropped: &ExtrinsicHash, + ) -> Option>> { + log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); + self.transactions.write().remove(dropped) } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory @@ -369,13 +393,13 @@ where }; let validations_futures = input.into_iter().map(|(xt_hash, xt)| { - self.api.validate_transaction(finalized_block.hash, xt.source, xt.tx()).map( - move |validation_result| { + self.api + .validate_transaction(finalized_block.hash, xt.source.clone().into(), xt.tx()) + .map(move |validation_result| { xt.validated_at .store(finalized_block.number.into().as_u64(), atomic::Ordering::Relaxed); (xt_hash, validation_result) - }, - ) + }) }); let validation_results = futures::future::join_all(validations_futures).await; let input_len = validation_results.len(); @@ -403,7 +427,7 @@ where log::debug!( target: LOG_TARGET, - "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} purged:{} took {duration:?}", invalid_hashes.len(), + "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} invalid_hashes:{} took {duration:?}", invalid_hashes.len(), ); invalid_hashes @@ -445,7 +469,7 @@ mod tx_mem_pool_tests { use super::*; use crate::{common::tests::TestApi, graph::ChainApi}; use substrate_test_runtime::{AccountId, Extrinsic, ExtrinsicBuilder, Transfer, H256}; - use substrate_test_runtime_client::AccountKeyring::*; + use substrate_test_runtime_client::Sr25519Keyring::*; fn uxt(nonce: u64) -> Extrinsic { crate::common::tests::uxt(Transfer { from: Alice.into(), diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 99095d88cb0a..3cbb8fa4871d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -27,13 +27,13 @@ use super::metrics::MetricsLink as PrometheusMetrics; use crate::{ common::log_xt::log_xt_trace, graph::{ - self, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, IsValidator, ValidatedTransaction, - ValidatedTransactionFor, + self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, + IsValidator, ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; use parking_lot::Mutex; -use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus}; use sp_blockchain::HashAndNumber; use sp_runtime::{ generic::BlockId, traits::Block as BlockT, transaction_validity::TransactionValidityError, @@ -157,22 +157,21 @@ where /// Imports many unvalidated extrinsics into the view. pub(super) async fn submit_many( &self, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); - log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); - self.pool.submit_at(&self.at, source, xts).await + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); + self.pool.submit_at(&self.at, xts).await } else { - self.pool.submit_at(&self.at, source, xts).await + self.pool.submit_at(&self.at, xts).await } } /// Import a single extrinsic and starts to watch its progress in the view. pub(super) async fn submit_and_watch( &self, - source: TransactionSource, + source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); @@ -193,7 +192,7 @@ where .api() .validate_transaction_blocking( self.at.hash, - TransactionSource::Local, + sc_transaction_pool_api::TransactionSource::Local, Arc::from(xt.clone()), )? .map_err(|e| { @@ -214,7 +213,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TransactionSource::Local, + TimedTransactionSource::new_local(true), Arc::from(xt), length, validity, @@ -285,7 +284,7 @@ where } _ = async { if let Some(tx) = batch_iter.next() { - let validation_result = (api.validate_transaction(self.at.hash, tx.source, tx.data.clone()).await, tx.hash, tx); + let validation_result = (api.validate_transaction(self.at.hash, tx.source.clone().into(), tx.data.clone()).await, tx.hash, tx); validation_results.push(validation_result); } else { self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); @@ -324,7 +323,7 @@ where ValidatedTransaction::valid_at( self.at.number.saturated_into::(), tx_hash, - tx.source, + tx.source.clone(), tx.data.clone(), api.hash_and_length(&tx.data).1, validity, @@ -455,4 +454,10 @@ where ); } } + + /// Returns true if the transaction with given hash is already imported into the view. + pub(super) fn is_imported(&self, tx_hash: &ExtrinsicHash) -> bool { + const IGNORE_BANNED: bool = false; + self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index f23dcedd5bfd..a06c051f0a7e 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -24,17 +24,51 @@ use super::{ }; use crate::{ fork_aware_txpool::dropped_watcher::MultiViewDroppedWatcherController, - graph, - graph::{base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, TransactionFor}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, TransactionFor, + }, ReadyIteratorFor, LOG_TARGET, }; use futures::prelude::*; use itertools::Itertools; use parking_lot::RwLock; -use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use std::{collections::HashMap, sync::Arc, time::Instant}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, + time::Instant, +}; + +/// Helper struct to keep the context for transaction replacements. +#[derive(Clone)] +struct PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Indicates if the new transaction was already submitted to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, + /// New transaction replacing the old one. + xt: ExtrinsicFor, + /// Source of the transaction. + source: TimedTransactionSource, + /// Inidicates if transaction is watched. + watched: bool, +} + +impl PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Creates new unprocessed instance of pending transaction replacement. + fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { + Self { processed: false, xt, source, watched } + } +} /// The helper structure encapsulates all the views. pub(super) struct ViewStore @@ -62,6 +96,13 @@ where pub(super) most_recent_view: RwLock>, /// The controller of multi view dropped stream. pub(super) dropped_stream_controller: MultiViewDroppedWatcherController, + /// The map used to synchronize replacement of transactions between maintain and dropped + /// notifcication threads. It is meant to assure that replaced transaction is also removed from + /// newly built views in maintain process. + /// + /// The map's key is hash of replaced extrinsic. + pending_txs_replacements: + RwLock, PendingTxReplacement>>, } impl ViewStore @@ -83,14 +124,14 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, + pending_txs_replacements: Default::default(), } } /// Imports a bunch of unverified extrinsics to every active view. pub(super) async fn submit( &self, - source: TransactionSource, - xts: impl IntoIterator> + Clone, + xts: impl IntoIterator)> + Clone, ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); @@ -99,7 +140,7 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(source, xts).await) } + async move { (view.at.hash, view.submit_many(xts).await) } }) .collect::>() }; @@ -145,7 +186,7 @@ where pub(super) async fn submit_and_watch( &self, _at: Block::Hash, - source: TransactionSource, + source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ChainApi::Error> { let tx_hash = self.api.hash_and_length(&xt).0; @@ -159,6 +200,7 @@ where .map(|(_, view)| { let view = view.clone(); let xt = xt.clone(); + let source = source.clone(); async move { match view.submit_and_watch(source, xt).await { Ok(watcher) => { @@ -261,12 +303,20 @@ where ) -> Vec, ExtrinsicFor>> { self.most_recent_view .read() - .map(|at| self.get_view_at(at, true)) + .map(|at| self.futures_at(at)) .flatten() - .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) .unwrap_or_default() } + /// Returns a list of future transactions in the view at given block hash. + pub(super) fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.get_view_at(at, true) + .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) + } + /// Collects all the transactions included in the blocks on the provided `tree_route` and /// triggers finalization event for them. /// @@ -329,12 +379,16 @@ where /// - moved to the inactive views set (`inactive_views`), /// - removed from the multi view listeners. /// - /// The `most_recent_view` is update with the reference to the newly inserted view. + /// The `most_recent_view` is updated with the reference to the newly inserted view. + /// + /// If there are any pending tx replacments, they are applied to the new view. pub(super) async fn insert_new_view( &self, view: Arc>, tree_route: &TreeRoute, ) { + self.apply_pending_tx_replacements(view.clone()).await; + //note: most_recent_view must be synced with changes in in/active_views. { let mut most_recent_view_lock = self.most_recent_view.write(); @@ -386,8 +440,10 @@ where let mut removed_views = vec![]; { - self.active_views - .read() + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + + active_views .iter() .filter(|(hash, v)| !match finalized_number { Err(_) | Ok(None) => **hash == finalized_hash, @@ -396,11 +452,8 @@ where }) .map(|(_, v)| removed_views.push(v.at.hash)) .for_each(drop); - } - { - self.inactive_views - .read() + inactive_views .iter() .filter(|(_, v)| !match finalized_number { Err(_) | Ok(None) => false, @@ -438,30 +491,48 @@ where let finalized_xts = self.finalize_route(finalized_hash, tree_route).await; let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); + let mut dropped_views = vec![]; //clean up older then finalized { let mut active_views = self.active_views.write(); - active_views.retain(|hash, v| match finalized_number { - Err(_) | Ok(None) => *hash == finalized_hash, - Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, - Ok(Some(n)) => v.at.number > n, + let mut inactive_views = self.inactive_views.write(); + active_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => *hash == finalized_hash, + Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, + Ok(Some(n)) => v.at.number > n, + }; + if !retain { + dropped_views.push(*hash); + } + retain }); - } - { - let mut inactive_views = self.inactive_views.write(); - inactive_views.retain(|_, v| match finalized_number { - Err(_) | Ok(None) => false, - Ok(Some(n)) => v.at.number >= n, + inactive_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => false, + Ok(Some(n)) => v.at.number >= n, + }; + if !retain { + dropped_views.push(*hash); + } + retain }); log::trace!(target:LOG_TARGET,"handle_finalized: inactive_views: {:?}", inactive_views.keys()); } - self.listener.remove_view(finalized_hash); + log::trace!(target:LOG_TARGET,"handle_finalized: dropped_views: {:?}", dropped_views); + self.listener.remove_stale_controllers(); self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); + self.listener.remove_view(finalized_hash); + for view in dropped_views { + self.listener.remove_view(view); + self.dropped_stream_controller.remove_view(view); + } + finalized_xts } @@ -484,4 +555,139 @@ where futures::future::join_all(finish_revalidation_futures).await; log::trace!(target:LOG_TARGET,"finish_background_revalidations took {:?}", start.elapsed()); } + + /// Replaces an existing transaction in the view_store with a new one. + /// + /// Attempts to replace a transaction identified by `replaced` with a new transaction `xt`. + /// + /// Before submitting a transaction to the views, the new *unprocessed* transaction replacement + /// record will be inserted into a pending replacement map. Once the submission to all the views + /// is accomplished, the record is marked as *processed*. + /// + /// This map is later applied in `insert_new_view` method executed from different thread. + /// + /// If the transaction is already being replaced, it will simply return without making + /// changes. + pub(super) async fn replace_transaction( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + replaced: ExtrinsicHash, + watched: bool, + ) { + if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { + entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + } else { + return + }; + + let xt_hash = self.api.hash_and_length(&xt).0; + log::trace!(target:LOG_TARGET,"[{replaced:?}] replace_transaction wtih {xt_hash:?}, w:{watched}"); + + self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; + + if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { + replacement.processed = true; + } + } + + /// Applies pending transaction replacements to the specified view. + /// + /// After application, all already processed replacements are removed. + async fn apply_pending_tx_replacements(&self, view: Arc>) { + let mut futures = vec![]; + for replacement in self.pending_txs_replacements.read().values() { + let xt_hash = self.api.hash_and_length(&replacement.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + replacement.source.clone(), + replacement.xt.clone(), + xt_hash, + replacement.watched, + )); + } + let _results = futures::future::join_all(futures).await; + self.pending_txs_replacements.write().retain(|_, r| r.processed); + } + + /// Submits `xt` to the given view. + /// + /// For watched transaction stream is added to the listener. + async fn replace_transaction_in_view( + &self, + view: Arc>, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + watched: bool, + ) { + if watched { + match view.submit_and_watch(source, xt).await { + Ok(watcher) => { + self.listener.add_view_watcher_for_tx( + xt_hash, + view.at.hash, + watcher.into_stream().boxed(), + ); + }, + Err(e) => { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit_and_watch to {} failed {}", + xt_hash, view.at.hash, e + ); + }, + } + } else { + if let Some(Err(e)) = view.submit_many(std::iter::once((source, xt))).await.pop() { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit to {} failed {}", + xt_hash, view.at.hash, e + ); + } + } + } + + /// Sends `xt` to every view (both active and inactive) containing `replaced` extrinsics. + /// + /// It is assumed that transaction is already known by the pool. Intended to ba called when `xt` + /// is replacing `replaced` extrinsic. + async fn replace_transaction_in_views( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + replaced: ExtrinsicHash, + watched: bool, + ) { + if watched && !self.listener.contains_tx(&xt_hash) { + log::trace!( + target:LOG_TARGET, + "error: replace_transaction_in_views: no listener for watched transaction {:?}", + xt_hash, + ); + return; + } + + let submit_futures = { + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + active_views + .iter() + .chain(inactive_views.iter()) + .filter(|(_, view)| view.is_imported(&replaced)) + .map(|(_, view)| { + self.replace_transaction_in_view( + view.clone(), + source.clone(), + xt.clone(), + xt_hash, + watched, + ) + }) + .collect::>() + }; + let _results = futures::future::join_all(submit_futures).await; + } } diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index e4c3a6c425a9..04eaa998f42e 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -20,7 +20,7 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc}; +use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc, time::Instant}; use crate::LOG_TARGET; use log::{trace, warn}; @@ -30,8 +30,8 @@ use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ traits::Member, transaction_validity::{ - TransactionLongevity as Longevity, TransactionPriority as Priority, - TransactionSource as Source, TransactionTag as Tag, + TransactionLongevity as Longevity, TransactionPriority as Priority, TransactionSource, + TransactionTag as Tag, }, }; @@ -83,6 +83,44 @@ pub struct PruneStatus { pub pruned: Vec>>, } +/// A transaction source that includes a timestamp indicating when the transaction was submitted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TimedTransactionSource { + /// The original source of the transaction. + pub source: TransactionSource, + + /// The time at which the transaction was submitted. + pub timestamp: Option, +} + +impl From for TransactionSource { + fn from(value: TimedTransactionSource) -> Self { + value.source + } +} + +impl TimedTransactionSource { + /// Creates a new instance with an internal `TransactionSource::InBlock` source and an optional + /// timestamp. + pub fn new_in_block(with_timestamp: bool) -> Self { + Self { source: TransactionSource::InBlock, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::External` source and an optional + /// timestamp. + pub fn new_external(with_timestamp: bool) -> Self { + Self { source: TransactionSource::External, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::Local` source and an optional + /// timestamp. + pub fn new_local(with_timestamp: bool) -> Self { + Self { source: TransactionSource::Local, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an given source and an optional timestamp. + pub fn from_transaction_source(source: TransactionSource, with_timestamp: bool) -> Self { + Self { source, timestamp: with_timestamp.then(Instant::now) } + } +} + /// Immutable transaction #[derive(PartialEq, Eq, Clone)] pub struct Transaction { @@ -102,8 +140,8 @@ pub struct Transaction { pub provides: Vec, /// Should that transaction be propagated. pub propagate: bool, - /// Source of that transaction. - pub source: Source, + /// Timed source of that transaction. + pub source: TimedTransactionSource, } impl AsRef for Transaction { @@ -157,7 +195,7 @@ impl Transaction { bytes: self.bytes, hash: self.hash.clone(), priority: self.priority, - source: self.source, + source: self.source.clone(), valid_till: self.valid_till, requires: self.requires.clone(), provides: self.provides.clone(), @@ -322,22 +360,36 @@ impl BasePool { if !first { - promoted.push(current_hash); + promoted.push(current_hash.clone()); } + // If there were conflicting future transactions promoted, removed them from + // promoted set. + promoted.retain(|hash| replaced.iter().all(|tx| *hash != tx.hash)); // The transactions were removed from the ready pool. We might attempt to // re-import them. removed.append(&mut replaced); }, + Err(e @ error::Error::TooLowPriority { .. }) => + if first { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + return Err(e) + } else { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + removed.push(current_tx); + promoted.retain(|hash| *hash != current_hash); + }, // transaction failed to be imported. Err(e) => if first { - trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); return Err(e) } else { - failed.push(current_hash); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + failed.push(current_tx.hash.clone()); }, } first = false; @@ -434,8 +486,24 @@ impl BasePool Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), - other => other, + Some(worst) => Some( + match (worst.transaction.source.timestamp, current.transaction.source.timestamp) + { + (Some(worst_timestamp), Some(current_timestamp)) => { + if worst_timestamp > current_timestamp { + current.clone() + } else { + worst + } + }, + _ => + if worst.imported_at > current.imported_at { + current.clone() + } else { + worst + }, + }, + ), }); if let Some(worst) = worst { @@ -562,7 +630,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: Source::External, + source: TimedTransactionSource::new_external(false), } } @@ -760,6 +828,58 @@ mod tests { ); } + #[test] + fn should_remove_conflicting_future() { + let mut pool = pool(); + pool.import(Transaction { + data: vec![3u8].into(), + hash: 3, + requires: vec![vec![1]], + priority: 50u64, + provides: vec![vec![3]], + ..default_tx().clone() + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + let tx2 = Transaction { + data: vec![2u8].into(), + hash: 2, + requires: vec![vec![1]], + provides: vec![vec![3]], + ..default_tx().clone() + }; + pool.import(tx2.clone()).unwrap(); + assert_eq!(pool.future.len(), 2); + + let res = pool + .import(Transaction { + data: vec![1u8].into(), + hash: 1, + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap(); + + assert_eq!( + res, + Imported::Ready { + hash: 1, + promoted: vec![3], + failed: vec![], + removed: vec![tx2.into()] + } + ); + + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + + assert_eq!(pool.future.len(), 0); + } + #[test] fn should_handle_a_cycle() { // given @@ -783,14 +903,14 @@ mod tests { assert_eq!(pool.ready.len(), 0); // when - pool.import(Transaction { + let tx2 = Transaction { data: vec![2u8].into(), hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], ..default_tx().clone() - }) - .unwrap(); + }; + pool.import(tx2.clone()).unwrap(); // then { @@ -817,7 +937,12 @@ mod tests { assert_eq!(it.next(), None); assert_eq!( res, - Imported::Ready { hash: 4, promoted: vec![1, 3], failed: vec![2], removed: vec![] } + Imported::Ready { + hash: 4, + promoted: vec![1, 3], + failed: vec![], + removed: vec![tx2.into()] + } ); assert_eq!(pool.future.len(), 0); } @@ -1024,7 +1149,7 @@ mod tests { ), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}" +source: TimedTransactionSource { source: TransactionSource::External, timestamp: None }, requires: [03, 02], provides: [04], data: [4]}" .to_owned() ); } diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index a5593920eec4..41daf5491f70 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -36,6 +36,7 @@ pub type DroppedByLimitsStream = TracingUnboundedReceiver { + /// Map containing per-transaction sinks for emitting transaction status events. watchers: HashMap>>, finality_watchers: LinkedHashMap, Vec>, @@ -119,32 +120,44 @@ impl Listener, limits_enforced: bool) { + /// Transaction was dropped from the pool because of enforcing the limit. + pub fn limit_enforced(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped (limit enforced)", tx); + self.fire(tx, |watcher| watcher.limit_enforced()); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); + } + } + } + + /// Transaction was replaced with other extrinsic. + pub fn usurped(&mut self, tx: &H, by: &H) { trace!(target: LOG_TARGET, "[{:?}] Dropped (replaced with {:?})", tx, by); - self.fire(tx, |watcher| match by { - Some(t) => watcher.usurped(t.clone()), - None => watcher.dropped(), - }); - - //note: LimitEnforced could be introduced as new status to get rid of this flag. - if limits_enforced { - if let Some(ref sink) = self.dropped_by_limits_sink { - if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { - trace!(target: LOG_TARGET, "[{:?}] dropped_sink/future: send message failed: {:?}", tx, e); - } + self.fire(tx, |watcher| watcher.usurped(by.clone())); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = + sink.unbounded_send((tx.clone(), TransactionStatus::Usurped(by.clone()))) + { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); } } } + /// Transaction was dropped from the pool because of the failure during the resubmission of + /// revalidate transactions or failure during pruning tags. + pub fn dropped(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped", tx); + self.fire(tx, |watcher| watcher.dropped()); + } + /// Transaction was removed as invalid. pub fn invalid(&mut self, tx: &H) { trace!(target: LOG_TARGET, "[{:?}] Extrinsic invalid", tx); diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 2dd8de352c6b..ff9cc1541af4 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -181,10 +181,8 @@ impl Pool { pub async fn submit_at( &self, at: &HashAndNumber, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, B::Error>> { - let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -195,10 +193,8 @@ impl Pool { pub async fn resubmit_at( &self, at: &HashAndNumber, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, B::Error>> { - let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -207,10 +203,10 @@ impl Pool { pub async fn submit_one( &self, at: &HashAndNumber, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - let res = self.submit_at(at, source, std::iter::once(xt)).await.pop(); + let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -218,7 +214,7 @@ impl Pool { pub async fn submit_and_watch( &self, at: &HashAndNumber, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { let (_, tx) = self @@ -368,7 +364,7 @@ impl Pool { // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. let pruned_transactions = - prune_status.pruned.into_iter().map(|tx| (tx.source, tx.data.clone())); + prune_status.pruned.into_iter().map(|tx| (tx.source.clone(), tx.data.clone())); let reverified_transactions = self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; @@ -396,7 +392,7 @@ impl Pool { async fn verify( &self, at: &HashAndNumber, - xts: impl IntoIterator)>, + xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, ) -> IndexMap, ValidatedTransactionFor> { let HashAndNumber { number, hash } = *at; @@ -417,7 +413,7 @@ impl Pool { &self, block_hash: ::Hash, block_number: NumberFor, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, check: CheckBannedBeforeVerify, ) -> (ExtrinsicHash, ValidatedTransactionFor) { @@ -431,7 +427,7 @@ impl Pool { let validation_result = self .validated_pool .api() - .validate_transaction(block_hash, source, xt.clone()) + .validate_transaction(block_hash, source.clone().into(), xt.clone()) .await; let status = match validation_result { @@ -488,6 +484,7 @@ mod tests { use super::{super::base_pool::Limit, *}; use crate::common::tests::{pool, uxt, TestApi, INVALID_NONCE}; use assert_matches::assert_matches; + use base::TimedTransactionSource; use codec::Encode; use futures::executor::block_on; use parking_lot::Mutex; @@ -495,9 +492,10 @@ mod tests { use sp_runtime::transaction_validity::TransactionSource; use std::{collections::HashMap, time::Instant}; use substrate_test_runtime::{AccountId, ExtrinsicBuilder, Transfer, H256}; - use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; + use substrate_test_runtime_client::Sr25519Keyring::{Alice, Bob}; - const SOURCE: TransactionSource = TransactionSource::External; + const SOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; #[test] fn should_validate_and_import_transaction() { @@ -545,8 +543,8 @@ mod tests { let initial_hashes = txs.iter().map(|t| api.hash_and_length(t).0).collect::>(); // when - let txs = txs.into_iter().map(|x| Arc::from(x)).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), SOURCE, txs)); + let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); log::debug!("--> {hashes:#?}"); // then diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 860bcff0bace..9061d0e25581 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -589,7 +589,6 @@ fn remove_item(vec: &mut Vec, item: &T) { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource as Source; fn tx(id: u8) -> Transaction> { Transaction { @@ -601,7 +600,7 @@ mod tests { requires: vec![vec![1], vec![2]], provides: vec![vec![3], vec![4]], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), } } @@ -711,7 +710,7 @@ mod tests { requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), }; // when diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 61a26fb4138c..9a2e269b5eed 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -106,7 +106,6 @@ impl PoolRotator { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource; type Hash = u64; type Ex = (); @@ -126,7 +125,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), }; (hash, tx) @@ -192,7 +191,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), } } diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index d7f55198a40a..14df63d9673e 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -30,7 +30,7 @@ use serde::Serialize; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::{self, SaturatedConversion}, - transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, + transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; @@ -62,7 +62,7 @@ impl ValidatedTransaction { pub fn valid_at( at: u64, hash: Hash, - source: TransactionSource, + source: base::TimedTransactionSource, data: Ex, bytes: usize, validity: ValidTransaction, @@ -280,7 +280,7 @@ impl ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.dropped(h, None, true); + listener.limit_enforced(h); } removed @@ -453,7 +453,7 @@ impl ValidatedPool { match final_status { Status::Future => listener.future(&hash), Status::Ready => listener.ready(&hash, None), - Status::Dropped => listener.dropped(&hash, None, false), + Status::Dropped => listener.dropped(&hash), Status::Failed => listener.invalid(&hash), } } @@ -492,7 +492,7 @@ impl ValidatedPool { fire_events(&mut *listener, promoted); } for f in &status.failed { - listener.dropped(f, None, false); + listener.dropped(f); } } @@ -671,6 +671,21 @@ impl ValidatedPool { ) -> super::listener::DroppedByLimitsStream, BlockHash> { self.listener.write().create_dropped_by_limits_stream() } + + /// Resends ready and future events for all the ready and future transactions that are already + /// in the pool. + /// + /// Intended to be called after cloning the instance of `ValidatedPool`. + pub fn retrigger_notifications(&self) { + let pool = self.pool.read(); + let mut listener = self.listener.write(); + pool.ready().for_each(|r| { + listener.ready(&r.hash, None); + }); + pool.futures().for_each(|f| { + listener.future(&f.hash); + }); + } } fn fire_events(listener: &mut Listener, imported: &base::Imported) @@ -682,7 +697,7 @@ where base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { listener.ready(hash, None); failed.iter().for_each(|f| listener.invalid(f)); - removed.iter().for_each(|r| listener.dropped(&r.hash, Some(hash), false)); + removed.iter().for_each(|r| listener.usurped(&r.hash, hash)); promoted.iter().for_each(|p| listener.ready(p, None)); }, base::Imported::Future { ref hash } => listener.future(hash), diff --git a/substrate/client/transaction-pool/src/graph/watcher.rs b/substrate/client/transaction-pool/src/graph/watcher.rs index fb7cf99d4dc6..2fd31e772fd8 100644 --- a/substrate/client/transaction-pool/src/graph/watcher.rs +++ b/substrate/client/transaction-pool/src/graph/watcher.rs @@ -113,6 +113,12 @@ impl Sender { } /// Transaction has been dropped from the pool because of the limit. + pub fn limit_enforced(&mut self) { + self.send(TransactionStatus::Dropped); + self.is_finalized = true; + } + + /// Transaction has been dropped from the pool. pub fn dropped(&mut self) { self.send(TransactionStatus::Dropped); self.is_finalized = true; diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 3d3d596c291f..366d91a973d2 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -36,7 +36,10 @@ pub use api::FullChainApi; pub use builder::{Builder, TransactionPoolHandle, TransactionPoolOptions, TransactionPoolType}; pub use common::notification_future; pub use fork_aware_txpool::{ForkAwareTxPool, ForkAwareTxPoolTask}; -pub use graph::{base_pool::Limit as PoolLimit, ChainApi, Options, Pool}; +pub use graph::{ + base_pool::{Limit as PoolLimit, TimedTransactionSource}, + ChainApi, Options, Pool, +}; use single_state_txpool::prune_known_txs_for_block; pub use single_state_txpool::{BasicPool, RevalidationType}; pub use transaction_pool_wrapper::TransactionPoolWrapper; diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index 5ef726c9f7d3..f22fa2ddabde 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -88,7 +88,7 @@ async fn batch_revalidate( let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(at, ext.source, ext.data.clone()) + api.validate_transaction(at, ext.source.clone().into(), ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) @@ -121,7 +121,7 @@ async fn batch_revalidate( ValidatedTransaction::valid_at( block_number.saturated_into::(), ext_hash, - ext.source, + ext.source.clone(), ext.data.clone(), api.hash_and_length(&ext.data).1, validity, @@ -375,11 +375,11 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, graph::Pool, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; - use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; + use substrate_test_runtime_client::Sr25519Keyring::{Alice, Bob}; #[test] fn revalidation_queue_works() { @@ -398,7 +398,7 @@ mod tests { let uxt_hash = block_on(pool.submit_one( &han_of_block0, - TransactionSource::External, + TimedTransactionSource::new_external(false), uxt.clone().into(), )) .expect("Should be valid"); @@ -433,14 +433,15 @@ mod tests { let han_of_block0 = api.expect_hash_and_number(0); let unknown_block = H256::repeat_byte(0x13); - let uxt_hashes = block_on(pool.submit_at( - &han_of_block0, - TransactionSource::External, - vec![uxt0.into(), uxt1.into()], - )) - .into_iter() - .map(|r| r.expect("Should be valid")) - .collect::>(); + let source = TimedTransactionSource::new_external(false); + let uxt_hashes = + block_on(pool.submit_at( + &han_of_block0, + vec![(source.clone(), uxt0.into()), (source, uxt1.into())], + )) + .into_iter() + .map(|r| r.expect("Should be valid")) + .collect::>(); assert_eq!(api.validation_requests().len(), 2); assert_eq!(pool.validated_pool().status().ready, 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index b29630b563bb..e7504012ca67 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -29,7 +29,7 @@ use crate::{ error, log_xt::log_xt_trace, }, - graph::{self, ExtrinsicHash, IsValidator}, + graph::{self, base_pool::TimedTransactionSource, ExtrinsicHash, IsValidator}, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; @@ -254,14 +254,19 @@ where xts: Vec>, ) -> Result, Self::Error>>, Self::Error> { let pool = self.pool.clone(); - let xts = xts.into_iter().map(Arc::from).collect::>(); + let xts = xts + .into_iter() + .map(|xt| { + (TimedTransactionSource::from_transaction_source(source, false), Arc::from(xt)) + }) + .collect::>(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, source, xts).await) + Ok(pool.submit_at(&at, xts).await) } async fn submit_one( @@ -277,7 +282,8 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - pool.submit_one(&at, source, xt).await + pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) + .await } async fn submit_and_watch( @@ -294,7 +300,13 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool.submit_and_watch(&at, source, xt).await?; + let watcher = pool + .submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await?; Ok(watcher.into_stream().boxed()) } @@ -458,7 +470,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TransactionSource::Local, + TimedTransactionSource::new_local(false), Arc::from(xt), bytes, validity, @@ -662,8 +674,8 @@ where resubmit_transactions.extend( //todo: arctx - we need to get ref from somewhere - block_transactions.into_iter().map(Arc::from).filter(|tx| { - let tx_hash = pool.hash_of(tx); + block_transactions.into_iter().map(Arc::from).filter_map(|tx| { + let tx_hash = pool.hash_of(&tx); let contains = pruned_log.contains(&tx_hash); // need to count all transactions, not just filtered, here @@ -676,8 +688,15 @@ where tx_hash, hash, ); + Some(( + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TimedTransactionSource::new_external(false), + tx, + )) + } else { + None } - !contains }), ); @@ -686,14 +705,7 @@ where }); } - pool.resubmit_at( - &hash_and_number, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await; + pool.resubmit_at(&hash_and_number, resubmit_transactions).await; } let extra_pool = pool.clone(); diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index 9f343a9bd029..8bf08122995c 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -30,7 +30,7 @@ use sc_transaction_pool_api::{ }; use sp_runtime::transaction_validity::InvalidTransaction; use std::{sync::Arc, time::Duration}; -use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_client::Sr25519Keyring::*; use substrate_test_runtime_transaction_pool::uxt; pub mod fatp_common; @@ -2267,19 +2267,13 @@ fn fatp_avoid_stuck_transaction() { assert_pool_status!(header06.hash(), &pool, 0, 0); - // Import enough blocks to make xt4i revalidated - let mut prev_header = header03; - // wait 10 blocks for revalidation - for n in 7..=11 { - let header = api.push_block(n, vec![], true); - let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); - block_on(pool.maintain(event)); - prev_header = header; - } + let header07 = api.push_block(7, vec![], true); + let event = finalized_block_event(&pool, header03.hash(), header07.hash()); + block_on(pool.maintain(event)); let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::>(); log::debug!("xt4i_events: {:#?}", xt4i_events); - assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Invalid]); + assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]); assert_eq!(pool.mempool_len(), (0, 0)); } diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index 15f2b7f79c14..aaffebc0db0a 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -24,7 +24,7 @@ use sp_runtime::transaction_validity::TransactionSource; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::{Block, Hash, Header}, - AccountKeyring::*, + Sr25519Keyring::*, }; use substrate_test_runtime_transaction_pool::{uxt, TestApi}; pub const LOG_TARGET: &str = "txpool"; @@ -201,6 +201,20 @@ macro_rules! assert_ready_iterator { }}; } +#[macro_export] +macro_rules! assert_future_iterator { + ($hash:expr, $pool:expr, [$( $xt:expr ),*]) => {{ + let futures = $pool.futures_at($hash).unwrap(); + let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?}", futures); + log::debug!(target:LOG_TARGET, "output: {:#?}", expected); + assert_eq!(expected.len(), futures.len()); + let hsf = futures.iter().map(|a| a.hash).collect::>(); + let hse = expected.into_iter().collect::>(); + assert_eq!(hse,hsf); + }}; +} + pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index 03792fd89dfa..fb02b21ebc2b 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -29,7 +29,7 @@ use sc_transaction_pool_api::{ error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, }; use std::thread::sleep; -use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_client::Sr25519Keyring::*; use substrate_test_runtime_transaction_pool::uxt; #[test] @@ -641,3 +641,192 @@ fn fatp_limits_future_size_works() { assert_pool_status!(header01.hash(), &pool, 0, 3); assert_eq!(pool.mempool_len().0, 3); } + +#[test] +fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let _xt2_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _xt3_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 6); + + let header04 = + api.push_block_with_parent(header03.hash(), vec![xt4.clone(), xt5.clone()], true); + api.set_nonce(header04.hash(), Alice.into(), 201); + api.set_nonce(header04.hash(), Bob.into(), 301); + api.set_nonce(header04.hash(), Charlie.into(), 401); + api.set_nonce(header04.hash(), Dave.into(), 501); + api.set_nonce(header04.hash(), Eve.into(), 601); + api.set_nonce(header04.hash(), Ferdie.into(), 701); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt3]); + assert_ready_iterator!(header03.hash(), pool, [xt4, xt5]); + assert_ready_iterator!(header04.hash(), pool, []); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + assert!(!pool.status_all().contains_key(&header01.hash())); + + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); + assert!(!pool.status_all().contains_key(&header02.hash())); + + //view 01 was dropped + assert!(pool.ready_at(header01.hash()).now_or_never().is_none()); + assert_eq!(pool.mempool_len().1, 6); + + block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); + + //no revalidation has happened yet, all txs are kept + assert_eq!(pool.mempool_len().1, 6); + + //view 03 is still there + assert!(!pool.status_all().contains_key(&header03.hash())); + + //view 02 was dropped + assert!(pool.ready_at(header02.hash()).now_or_never().is_none()); + + let mut prev_header = header03; + for n in 5..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + //now revalidation has happened, all txs are dropped + assert_eq!(pool.mempool_len().1, 0); +} + +#[test] +fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_future_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Bob, 301); + let xt2 = uxt(Charlie, 401); + + let xt3 = uxt(Dave, 501); + let xt4 = uxt(Eve, 601); + let xt5 = uxt(Ferdie, 701); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 2); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 4); + assert_future_iterator!(header02.hash(), pool, [xt2, xt3]); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 6); + assert_future_iterator!(header03.hash(), pool, [xt4, xt5]); + + let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + assert_future_iterator!(header04.hash(), pool, [xt4, xt5]); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header04.hash()))); + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), 0); + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().1 != 2 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout {}", pool.mempool_len().1); + } + } + assert_eq!(pool.mempool_len().1, 2); + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + + let to_be_checked = vec![xt0_watcher, xt1_watcher, xt2_watcher, xt3_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(2).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future, TransactionStatus::Dropped]); + } + + let to_be_checked = vec![xt4_watcher, xt5_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(1).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future]); + } +} diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs new file mode 100644 index 000000000000..4ed9b4503861 --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -0,0 +1,249 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests of priorities for fork-aware transaction pool. + +pub mod fatp_common; + +use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; +use futures::{executor::block_on, FutureExt}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +#[test] +fn fatp_prio_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let result0 = block_on(pool.submit_one(header01.hash(), SOURCE, xt0.clone())); + let result1 = block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())); + + log::info!("r0 => {:?}", result0); + log::info!("r1 => {:?}", result1); + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_prio_watcher_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_prio_watcher_future_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(3).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future, TransactionStatus::Ready]); + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + + assert_eq!(pool.mempool_len().1, 2); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_pool_status!(header01.hash(), &pool, 2, 0); +} + +#[test] +fn fatp_prio_watcher_ready_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 1, 0); + assert_ready_iterator!(header03a.hash(), pool, [xt0]); + assert_pool_status!(header03b.hash(), &pool, 1, 0); + assert_ready_iterator!(header03b.hash(), pool, [xt0]); + assert_ready_iterator!(header01.hash(), pool, [xt0]); + assert_ready_iterator!(header02.hash(), pool, [xt0]); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + assert_ready_iterator!(header03a.hash(), pool, [xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt1]); +} + +#[test] +fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 0, 2); + assert_future_iterator!(header03a.hash(), pool, [xt0, xt1]); + assert_pool_status!(header03b.hash(), &pool, 0, 2); + assert_future_iterator!(header03b.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header02.hash(), pool, [xt0, xt1]); + + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + assert_future_iterator!(header03a.hash(), pool, []); + assert_future_iterator!(header03b.hash(), pool, []); + assert_future_iterator!(header01.hash(), pool, []); + assert_future_iterator!(header02.hash(), pool, []); + + assert_ready_iterator!(header03a.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); +} diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index ed0fd7d4e655..20997606c607 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -40,8 +40,8 @@ use sp_runtime::{ use std::{collections::BTreeSet, pin::Pin, sync::Arc}; use substrate_test_runtime_client::{ runtime::{Block, Extrinsic, ExtrinsicBuilder, Hash, Header, Nonce, Transfer, TransferData}, - AccountKeyring::*, ClientBlockImportExt, + Sr25519Keyring::*, }; use substrate_test_runtime_transaction_pool::{uxt, TestApi}; @@ -80,12 +80,14 @@ fn create_basic_pool(test_api: TestApi) -> BasicPool { create_basic_pool_with_genesis(Arc::from(test_api)).0 } +const TSOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool @@ -99,9 +101,9 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -116,7 +118,7 @@ fn multiple_submission_should_work() { fn early_nonce_should_be_culled() { sp_tracing::try_init_simple(); let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 208).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 208).into())) .unwrap(); log::debug!("-> {:?}", pool.validated_pool().status()); @@ -132,7 +134,7 @@ fn early_nonce_should_be_culled() { fn late_nonce_should_be_queued() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -141,7 +143,7 @@ fn late_nonce_should_be_queued() { .collect(); assert_eq!(pending, Vec::::new()); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -155,9 +157,9 @@ fn late_nonce_should_be_queued() { fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -183,9 +185,9 @@ fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool @@ -196,7 +198,7 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -224,7 +226,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { })); let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt0 = Arc::from(uxt(Alice, 209)); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt0.clone())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) .expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(api.validation_requests().len(), 1); @@ -242,7 +244,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.increment_nonce(Alice.into()); api.push_block(2, Vec::new(), true); let xt1 = uxt(Alice, 211); - block_on(pool.submit_one(&api.expect_hash_and_number(2), SOURCE, xt1.clone().into())) + block_on(pool.submit_one(&api.expect_hash_and_number(2), TSOURCE, xt1.clone().into())) .expect("2. Imported"); assert_eq!(api.validation_requests().len(), 3); assert_eq!(pool.validated_pool().status().ready, 1); diff --git a/substrate/docs/Upgrading-2.0-to-3.0.md b/substrate/docs/Upgrading-2.0-to-3.0.md index 1be41a34ef34..f6fc5cf4b079 100644 --- a/substrate/docs/Upgrading-2.0-to-3.0.md +++ b/substrate/docs/Upgrading-2.0-to-3.0.md @@ -1003,7 +1003,7 @@ modified your chain you should probably try to apply these patches: }; use sp_timestamp; - use sp_finality_tracker; - use sp_keyring::AccountKeyring; + use sp_keyring::Sr25519Keyring; use sc_service_test::TestNetNode; use crate::service::{new_full_base, new_light_base, NewFullBase}; - use sp_runtime::traits::IdentifyAccount; @@ -1034,7 +1034,7 @@ modified your chain you should probably try to apply these patches: + let mut slot = 1u64; // For the extrinsics factory - let bob = Arc::new(AccountKeyring::Bob.pair()); + let bob = Arc::new(Sr25519Keyring::Bob.pair()); @@ -528,14 +539,13 @@ mod tests { Ok((node, (inherent_data_providers, setup_handles.unwrap()))) }, diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index 8ad85d5420ed..1e931958898d 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -25,7 +25,7 @@ use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use frame_system::{pallet_prelude::BlockNumberFor as SystemBlockNumberFor, RawOrigin}; use sp_runtime::traits::{BlockNumberProvider, Bounded}; use crate::Pallet as Bounties; @@ -33,7 +33,7 @@ use pallet_treasury::Pallet as Treasury; const SEED: u32 = 0; -fn set_block_number, I: 'static>(n: BlockNumberFor) { +fn set_block_number, I: 'static>(n: BlockNumberFor) { >::BlockNumberProvider::set_block_number(n); } @@ -132,7 +132,7 @@ benchmarks_instance_pallet! { Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + Treasury::::on_initialize(SystemBlockNumberFor::::zero()); }: _(approve_origin, bounty_id, curator_lookup, fee) verify { assert_last_event::( diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index 3ed408a19120..729c76b5cc75 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -105,7 +105,9 @@ use sp_runtime::{ use frame_support::{dispatch::DispatchResultWithPostInfo, traits::EnsureOrigin}; use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::*; +use frame_system::pallet_prelude::{ + ensure_signed, BlockNumberFor as SystemBlockNumberFor, OriginFor, +}; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -120,6 +122,9 @@ pub type BountyIndex = u32; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Bounty { @@ -213,11 +218,11 @@ pub mod pallet { /// The delay period for which a bounty beneficiary need to wait before claim the payout. #[pallet::constant] - type BountyDepositPayoutDelay: Get>; + type BountyDepositPayoutDelay: Get>; /// Bounty duration in blocks. #[pallet::constant] - type BountyUpdatePeriod: Get>; + type BountyUpdatePeriod: Get>; /// The curator deposit is calculated as a percentage of the curator fee. /// @@ -326,7 +331,7 @@ pub mod pallet { _, Twox64Concat, BountyIndex, - Bounty, BlockNumberFor>, + Bounty, BlockNumberFor>, >; /// The description of each bounty. @@ -876,9 +881,9 @@ pub mod pallet { } #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { + impl, I: 'static> Hooks> for Pallet { #[cfg(feature = "try-runtime")] - fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + fn try_state(_n: SystemBlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state() } } @@ -928,7 +933,7 @@ impl, I: 'static> Pallet { /// Get the block number used in the treasury pallet. /// /// It may be configured to use the relay chain block number on a parachain. - pub fn treasury_block_number() -> BlockNumberFor { + pub fn treasury_block_number() -> BlockNumberFor { >::BlockNumberProvider::current_block_number() } diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 9ef9b1254435..044689b254c5 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -30,11 +30,11 @@ use frame_support::{ }, }; use frame_system::{Pallet as System, RawOrigin}; -use sp_arithmetic::{traits::Zero, Perbill}; +use sp_arithmetic::Perbill; use sp_core::Get; use sp_runtime::{ traits::{BlockNumberProvider, MaybeConvert}, - SaturatedConversion, Saturating, + Saturating, }; const SEED: u32 = 0; @@ -287,7 +287,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, 1001, Final) .map_err(|_| BenchmarkError::Weightless)?; @@ -316,7 +316,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); @@ -349,7 +349,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); #[extrinsic_call] _(RawOrigin::Signed(caller), region, 2); @@ -381,7 +381,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); #[extrinsic_call] _(RawOrigin::Signed(caller), region, 0x00000_fffff_fffff_00000.into()); @@ -417,7 +417,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); #[extrinsic_call] _(RawOrigin::Signed(caller), region, 1000, Provisional); @@ -452,7 +452,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); @@ -492,7 +492,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); T::Currency::set_balance(&recipient.clone(), T::Currency::minimum_balance()); @@ -548,7 +548,7 @@ mod benches { T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); @@ -582,7 +582,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); advance_to::( (T::TimeslicePeriod::get() * (region_len * 4).into()).try_into().ok().unwrap(), @@ -616,7 +616,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); @@ -786,80 +786,97 @@ mod benches { #[benchmark] fn rotate_sale(n: Linear<0, { MAX_CORE_COUNT.into() }>) -> Result<(), BenchmarkError> { - let core_count = n.try_into().unwrap(); let config = new_config_record::(); + Configuration::::put(config.clone()); - let now = RCBlockNumberProviderOf::::current_block_number(); - let end_price = 10_000_000u32.into(); - let commit_timeslice = Broker::::latest_timeslice_ready_to_commit(&config); - let sale = SaleInfoRecordOf:: { - sale_start: now, - leadin_length: Zero::zero(), - end_price, - sellout_price: None, - region_begin: commit_timeslice, - region_end: commit_timeslice.saturating_add(config.region_length), - first_core: 0, - ideal_cores_sold: 0, - cores_offered: 0, - cores_sold: 0, - }; - - let status = StatusRecord { - core_count, - private_pool_size: 0, - system_pool_size: 0, - last_committed_timeslice: commit_timeslice.saturating_sub(1), - last_timeslice: Broker::::current_timeslice(), - }; + // Ensure there is one buyable core then use the rest to max out reservations and leases, if + // possible for worst case. + + // First allocate up to MaxReservedCores for reservations + let n_reservations = T::MaxReservedCores::get().min(n.saturating_sub(1)); + setup_reservations::(n_reservations); + // Then allocate remaining cores to leases, up to MaxLeasedCores + let n_leases = + T::MaxLeasedCores::get().min(n.saturating_sub(1).saturating_sub(n_reservations)); + setup_leases::(n_leases, 1, 20); + + // Start sales so we can test the auto-renewals. + Broker::::do_start_sales( + 10_000_000u32.into(), + n.saturating_sub(n_reservations) + .saturating_sub(n_leases) + .try_into() + .expect("Upper limit of n is a u16."), + ) + .expect("Configuration was initialized before; qed"); + + // Advance to the fixed price period. + advance_to::(2); - // Assume Reservations to be filled for worst case - setup_reservations::(T::MaxReservedCores::get()); + // Assume max auto renewals for worst case. This is between 1 and the value of + // MaxAutoRenewals. + let n_renewable = T::MaxAutoRenewals::get() + .min(n.saturating_sub(n_leases).saturating_sub(n_reservations)); - // Assume Leases to be filled for worst case - setup_leases::(T::MaxLeasedCores::get(), 1, 10); + let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); + let sale = SaleInfo::::get().expect("Sale has started."); - // Assume max auto renewals for worst case. - (0..T::MaxAutoRenewals::get()).try_for_each(|indx| -> Result<(), BenchmarkError> { + (0..n_renewable.into()).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100u32.into()), + T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, None)?; + Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; Ok(()) })?; + // Advance to the block before the rotate_sale in which the auto-renewals will take place. + let rotate_block = timeslice_period.saturating_mul(config.region_length) - 2; + advance_to::(rotate_block - 1); + + // Advance one block and manually tick so we can isolate the `rotate_sale` call. + System::::set_block_number(rotate_block.into()); + RCBlockNumberProviderOf::::set_block_number(rotate_block.into()); + let mut status = Status::::get().expect("Sale has started."); + let sale = SaleInfo::::get().expect("Sale has started."); + Broker::::process_core_count(&mut status); + Broker::::process_revenue(); + status.last_committed_timeslice = config.region_length; + #[block] { Broker::::rotate_sale(sale.clone(), &config, &status); } - assert!(SaleInfo::::get().is_some()); - let sale_start = RCBlockNumberProviderOf::::current_block_number() + - config.interlude_length; - assert_last_event::( + // Get prices from the actual price adapter. + let new_prices = T::PriceAdapter::adapt_price(SalePerformance::from_sale(&sale)); + let new_sale = SaleInfo::::get().expect("Sale has started."); + let now = RCBlockNumberProviderOf::::current_block_number(); + let sale_start = config.interlude_length.saturating_add(rotate_block.into()); + + assert_has_event::( Event::SaleInitialized { sale_start, leadin_length: 1u32.into(), - start_price: 1_000_000_000u32.into(), - end_price: 10_000_000u32.into(), + start_price: Broker::::sale_price(&new_sale, now), + end_price: new_prices.end_price, region_begin: sale.region_begin + config.region_length, region_end: sale.region_end + config.region_length, ideal_cores_sold: 0, cores_offered: n - .saturating_sub(T::MaxReservedCores::get()) - .saturating_sub(T::MaxLeasedCores::get()) + .saturating_sub(n_reservations) + .saturating_sub(n_leases) .try_into() .unwrap(), } @@ -867,18 +884,18 @@ mod benches { ); // Make sure all cores got renewed: - (0..T::MaxAutoRenewals::get()).for_each(|indx| { + (0..n_renewable).for_each(|indx| { let task = 1000 + indx; let who = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); assert_has_event::( Event::Renewed { who, - old_core: 10 + indx as u16, // first ten cores are allocated to leases. - core: 10 + indx as u16, - price: 10u32.saturated_into(), - begin: 7, - duration: 3, + old_core: n_reservations as u16 + n_leases as u16 + indx as u16, + core: n_reservations as u16 + n_leases as u16 + indx as u16, + price: 10_000_000u32.into(), + begin: new_sale.region_begin, + duration: config.region_length, workload: Schedule::truncate_from(vec![ScheduleItem { assignment: Task(task), mask: CoreMask::complete(), @@ -1018,56 +1035,62 @@ mod benches { #[benchmark] fn enable_auto_renew() -> Result<(), BenchmarkError> { - let _core = setup_and_start_sale::()?; + let _core_id = setup_and_start_sale::()?; advance_to::(2); + let sale = SaleInfo::::get().expect("Sale has already started."); // We assume max auto renewals for worst case. (0..T::MaxAutoRenewals::get() - 1).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); + // Sovereign account needs sufficient funds to purchase and renew. T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100u32.into()), + T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, Some(7))?; + Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; Ok(()) })?; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(2001).expect("Failed to get sovereign account"); + // Sovereign account needs sufficient funds to purchase and renew. T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100u32.into()), + T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), ); // The region for which we benchmark enable auto renew. - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, 2001, Final) .map_err(|_| BenchmarkError::Weightless)?; // The most 'intensive' path is when we renew the core upon enabling auto-renewal. // Therefore, we advance to next bulk sale: - advance_to::(6); + let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); + let config = Configuration::::get().expect("Already configured."); + advance_to::(config.region_length * timeslice_period); #[extrinsic_call] _(RawOrigin::Signed(caller), region.core, 2001, None); assert_last_event::(Event::AutoRenewalEnabled { core: region.core, task: 2001 }.into()); // Make sure we indeed renewed: + let sale = SaleInfo::::get().expect("Sales have started."); assert!(PotentialRenewals::::get(PotentialRenewalId { core: region.core, - when: 10 // region end after renewal + when: sale.region_end, }) .is_some()); @@ -1076,37 +1099,41 @@ mod benches { #[benchmark] fn disable_auto_renew() -> Result<(), BenchmarkError> { - let _core = setup_and_start_sale::()?; + let core_id = setup_and_start_sale::()?; advance_to::(2); + let sale = SaleInfo::::get().expect("Sale has already started."); // We assume max auto renewals for worst case. - (0..T::MaxAutoRenewals::get() - 1).try_for_each(|indx| -> Result<(), BenchmarkError> { + (0..T::MaxAutoRenewals::get()).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, Some(7))?; + Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; Ok(()) })?; + let task = 1000; + let caller: T::AccountId = - T::SovereignAccountOf::maybe_convert(1000).expect("Failed to get sovereign account"); + T::SovereignAccountOf::maybe_convert(task).expect("Failed to get sovereign account"); + #[extrinsic_call] - _(RawOrigin::Signed(caller), _core, 1000); + _(RawOrigin::Signed(caller), core_id, task); - assert_last_event::(Event::AutoRenewalDisabled { core: _core, task: 1000 }.into()); + assert_last_event::(Event::AutoRenewalDisabled { core: core_id, task }.into()); Ok(()) } @@ -1120,11 +1147,11 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(u32::MAX.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let _region = Broker::::do_purchase(caller.clone(), (u32::MAX / 2).into()) - .map_err(|_| BenchmarkError::Weightless)?; + let _region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); let timeslice = Broker::::current_timeslice(); diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index 2f25fddc2050..894fed5a6a00 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `sergej-B650-AORUS-ELITE-AX`, CPU: `AMD Ryzen 9 7900X3D 12-Core Processor` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/release/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/broker/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/broker/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -80,9 +78,9 @@ pub trait WeightInfo { fn notify_revenue() -> Weight; fn do_tick_base() -> Weight; fn swap_leases() -> Weight; - fn on_new_timeslice() -> Weight; fn enable_auto_renew() -> Weight; fn disable_auto_renew() -> Weight; + fn on_new_timeslice() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -94,8 +92,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_593_000 picoseconds. - Weight::from_parts(1_703_000, 0) + // Minimum execution time: 2_498_000 picoseconds. + Weight::from_parts(2_660_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -104,8 +102,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 12_864_000 picoseconds. - Weight::from_parts(13_174_000, 7496) + // Minimum execution time: 23_090_000 picoseconds. + Weight::from_parts(23_664_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -115,8 +113,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 12_284_000 picoseconds. - Weight::from_parts(13_566_000, 7496) + // Minimum execution time: 21_782_000 picoseconds. + Weight::from_parts(22_708_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -126,8 +124,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 6_743_000 picoseconds. - Weight::from_parts(7_094_000, 1526) + // Minimum execution time: 14_966_000 picoseconds. + Weight::from_parts(15_592_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -152,10 +150,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 21_120_000 picoseconds. - Weight::from_parts(40_929_422, 8499) - // Standard Error: 471 - .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(n.into())) + // Minimum execution time: 31_757_000 picoseconds. + Weight::from_parts(57_977_268, 8499) + // Standard Error: 576 + .saturating_add(Weight::from_parts(3_102, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -163,19 +161,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `651` - // Estimated: `2136` - // Minimum execution time: 31_169_000 picoseconds. - Weight::from_parts(32_271_000, 2136) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `470` + // Estimated: `1542` + // Minimum execution time: 40_469_000 picoseconds. + Weight::from_parts(41_360_000, 1542) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -186,19 +180,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `769` + // Measured: `588` // Estimated: `4698` - // Minimum execution time: 44_945_000 picoseconds. - Weight::from_parts(47_119_000, 4698) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Minimum execution time: 60_724_000 picoseconds. + Weight::from_parts(63_445_000, 4698) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -207,8 +197,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 11_562_000 picoseconds. - Weight::from_parts(11_943_000, 3551) + // Minimum execution time: 23_734_000 picoseconds. + Weight::from_parts(25_080_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -218,8 +208,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 13_075_000 picoseconds. - Weight::from_parts(13_616_000, 3551) + // Minimum execution time: 25_917_000 picoseconds. + Weight::from_parts(26_715_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -229,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 13_695_000 picoseconds. - Weight::from_parts(14_658_000, 3551) + // Minimum execution time: 26_764_000 picoseconds. + Weight::from_parts(27_770_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -246,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 22_623_000 picoseconds. - Weight::from_parts(23_233_000, 4681) + // Minimum execution time: 37_617_000 picoseconds. + Weight::from_parts(39_333_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -265,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 26_901_000 picoseconds. - Weight::from_parts(27_472_000, 5996) + // Minimum execution time: 43_168_000 picoseconds. + Weight::from_parts(44_741_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -281,10 +271,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `878` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 51_778_000 picoseconds. - Weight::from_parts(53_726_731, 6196) - // Standard Error: 45_279 - .saturating_add(Weight::from_parts(677_769, 0).saturating_mul(m.into())) + // Minimum execution time: 75_317_000 picoseconds. + Weight::from_parts(76_792_860, 6196) + // Standard Error: 55_267 + .saturating_add(Weight::from_parts(1_878_133, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -296,8 +286,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 31_790_000 picoseconds. - Weight::from_parts(32_601_000, 3593) + // Minimum execution time: 44_248_000 picoseconds. + Weight::from_parts(45_201_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -309,8 +299,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 18_465_000 picoseconds. - Weight::from_parts(21_050_000, 3551) + // Minimum execution time: 39_853_000 picoseconds. + Weight::from_parts(44_136_000, 3551) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -324,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 23_825_000 picoseconds. - Weight::from_parts(26_250_000, 3533) + // Minimum execution time: 46_452_000 picoseconds. + Weight::from_parts(52_780_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -339,10 +329,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `1014` + // Measured: `1117` // Estimated: `3593` - // Minimum execution time: 28_103_000 picoseconds. - Weight::from_parts(32_622_000, 3593) + // Minimum execution time: 64_905_000 picoseconds. + Weight::from_parts(72_914_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -354,8 +344,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 16_751_000 picoseconds. - Weight::from_parts(17_373_000, 4698) + // Minimum execution time: 38_831_000 picoseconds. + Weight::from_parts(41_420_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -364,8 +354,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_705_000 picoseconds. - Weight::from_parts(2_991_768, 0) + // Minimum execution time: 4_595_000 picoseconds. + Weight::from_parts(4_964_606, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -374,37 +364,58 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 4_598_000 picoseconds. - Weight::from_parts(4_937_302, 1487) + // Minimum execution time: 8_640_000 picoseconds. + Weight::from_parts(9_153_332, 1487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `991` - // Estimated: `4456` - // Minimum execution time: 37_601_000 picoseconds. - Weight::from_parts(38_262_000, 4456) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `667` + // Estimated: `3593` + // Minimum execution time: 40_570_000 picoseconds. + Weight::from_parts(41_402_000, 3593) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:10 w:20) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:10 w:10) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Broker::SaleInfo` (r:0 w:1) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:1000) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) + fn rotate_sale(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8548` + // Estimated: `38070` + // Minimum execution time: 29_370_000 picoseconds. + Weight::from_parts(334_030_189, 38070) + // Standard Error: 6_912 + .saturating_add(Weight::from_parts(1_268_750, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(26_u64)) + .saturating_add(T::DbWeight::get().writes(34_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -414,8 +425,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 5_391_000 picoseconds. - Weight::from_parts(5_630_000, 3493) + // Minimum execution time: 9_005_000 picoseconds. + Weight::from_parts(9_392_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -427,8 +438,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 10_249_000 picoseconds. - Weight::from_parts(10_529_000, 4681) + // Minimum execution time: 19_043_000 picoseconds. + Weight::from_parts(20_089_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -436,8 +447,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 120_000 picoseconds. - Weight::from_parts(140_000, 0) + // Minimum execution time: 149_000 picoseconds. + Weight::from_parts(183_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -445,8 +456,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_402_000 picoseconds. - Weight::from_parts(1_513_000, 0) + // Minimum execution time: 2_248_000 picoseconds. + Weight::from_parts(2_425_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::RevenueInbox` (r:0 w:1) @@ -455,8 +466,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_902_000 picoseconds. - Weight::from_parts(2_116_000, 0) + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_640_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -465,16 +476,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `4068` - // Minimum execution time: 8_897_000 picoseconds. - Weight::from_parts(9_218_000, 4068) + // Measured: `441` + // Estimated: `1516` + // Minimum execution time: 17_083_000 picoseconds. + Weight::from_parts(18_077_000, 1516) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -482,18 +493,11 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 4_678_000 picoseconds. - Weight::from_parts(4_920_000, 1526) + // Minimum execution time: 11_620_000 picoseconds. + Weight::from_parts(12_063_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - fn on_new_timeslice() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 229_000 picoseconds. - Weight::from_parts(268_000, 0) - } /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) @@ -504,34 +508,37 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `930` + // Measured: `1121` // Estimated: `4698` - // Minimum execution time: 51_597_000 picoseconds. - Weight::from_parts(52_609_000, 4698) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 85_270_000 picoseconds. + Weight::from_parts(90_457_000, 4698) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `578` // Estimated: `1586` - // Minimum execution time: 8_907_000 picoseconds. - Weight::from_parts(9_167_000, 1586) + // Minimum execution time: 22_479_000 picoseconds. + Weight::from_parts(23_687_000, 1586) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 245_000 picoseconds. + Weight::from_parts(290_000, 0) + } } // For backwards compatibility and tests. @@ -542,8 +549,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_593_000 picoseconds. - Weight::from_parts(1_703_000, 0) + // Minimum execution time: 2_498_000 picoseconds. + Weight::from_parts(2_660_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -552,8 +559,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 12_864_000 picoseconds. - Weight::from_parts(13_174_000, 7496) + // Minimum execution time: 23_090_000 picoseconds. + Weight::from_parts(23_664_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -563,8 +570,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 12_284_000 picoseconds. - Weight::from_parts(13_566_000, 7496) + // Minimum execution time: 21_782_000 picoseconds. + Weight::from_parts(22_708_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -574,8 +581,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 6_743_000 picoseconds. - Weight::from_parts(7_094_000, 1526) + // Minimum execution time: 14_966_000 picoseconds. + Weight::from_parts(15_592_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -600,10 +607,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 21_120_000 picoseconds. - Weight::from_parts(40_929_422, 8499) - // Standard Error: 471 - .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(n.into())) + // Minimum execution time: 31_757_000 picoseconds. + Weight::from_parts(57_977_268, 8499) + // Standard Error: 576 + .saturating_add(Weight::from_parts(3_102, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -611,19 +618,15 @@ impl WeightInfo for () { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `651` - // Estimated: `2136` - // Minimum execution time: 31_169_000 picoseconds. - Weight::from_parts(32_271_000, 2136) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `470` + // Estimated: `1542` + // Minimum execution time: 40_469_000 picoseconds. + Weight::from_parts(41_360_000, 1542) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -634,19 +637,15 @@ impl WeightInfo for () { /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `769` + // Measured: `588` // Estimated: `4698` - // Minimum execution time: 44_945_000 picoseconds. - Weight::from_parts(47_119_000, 4698) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Minimum execution time: 60_724_000 picoseconds. + Weight::from_parts(63_445_000, 4698) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -655,8 +654,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 11_562_000 picoseconds. - Weight::from_parts(11_943_000, 3551) + // Minimum execution time: 23_734_000 picoseconds. + Weight::from_parts(25_080_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -666,8 +665,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 13_075_000 picoseconds. - Weight::from_parts(13_616_000, 3551) + // Minimum execution time: 25_917_000 picoseconds. + Weight::from_parts(26_715_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -677,8 +676,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 13_695_000 picoseconds. - Weight::from_parts(14_658_000, 3551) + // Minimum execution time: 26_764_000 picoseconds. + Weight::from_parts(27_770_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -694,8 +693,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 22_623_000 picoseconds. - Weight::from_parts(23_233_000, 4681) + // Minimum execution time: 37_617_000 picoseconds. + Weight::from_parts(39_333_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -713,8 +712,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 26_901_000 picoseconds. - Weight::from_parts(27_472_000, 5996) + // Minimum execution time: 43_168_000 picoseconds. + Weight::from_parts(44_741_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -729,10 +728,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `878` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 51_778_000 picoseconds. - Weight::from_parts(53_726_731, 6196) - // Standard Error: 45_279 - .saturating_add(Weight::from_parts(677_769, 0).saturating_mul(m.into())) + // Minimum execution time: 75_317_000 picoseconds. + Weight::from_parts(76_792_860, 6196) + // Standard Error: 55_267 + .saturating_add(Weight::from_parts(1_878_133, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -744,8 +743,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 31_790_000 picoseconds. - Weight::from_parts(32_601_000, 3593) + // Minimum execution time: 44_248_000 picoseconds. + Weight::from_parts(45_201_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -757,8 +756,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 18_465_000 picoseconds. - Weight::from_parts(21_050_000, 3551) + // Minimum execution time: 39_853_000 picoseconds. + Weight::from_parts(44_136_000, 3551) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -772,8 +771,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 23_825_000 picoseconds. - Weight::from_parts(26_250_000, 3533) + // Minimum execution time: 46_452_000 picoseconds. + Weight::from_parts(52_780_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -787,10 +786,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `1014` + // Measured: `1117` // Estimated: `3593` - // Minimum execution time: 28_103_000 picoseconds. - Weight::from_parts(32_622_000, 3593) + // Minimum execution time: 64_905_000 picoseconds. + Weight::from_parts(72_914_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -802,8 +801,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 16_751_000 picoseconds. - Weight::from_parts(17_373_000, 4698) + // Minimum execution time: 38_831_000 picoseconds. + Weight::from_parts(41_420_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -812,8 +811,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_705_000 picoseconds. - Weight::from_parts(2_991_768, 0) + // Minimum execution time: 4_595_000 picoseconds. + Weight::from_parts(4_964_606, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -822,37 +821,58 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 4_598_000 picoseconds. - Weight::from_parts(4_937_302, 1487) + // Minimum execution time: 8_640_000 picoseconds. + Weight::from_parts(9_153_332, 1487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `991` - // Estimated: `4456` - // Minimum execution time: 37_601_000 picoseconds. - Weight::from_parts(38_262_000, 4456) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `667` + // Estimated: `3593` + // Minimum execution time: 40_570_000 picoseconds. + Weight::from_parts(41_402_000, 3593) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:10 w:20) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:10 w:10) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Broker::SaleInfo` (r:0 w:1) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:1000) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) + fn rotate_sale(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8548` + // Estimated: `38070` + // Minimum execution time: 29_370_000 picoseconds. + Weight::from_parts(334_030_189, 38070) + // Standard Error: 6_912 + .saturating_add(Weight::from_parts(1_268_750, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(26_u64)) + .saturating_add(RocksDbWeight::get().writes(34_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -862,8 +882,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 5_391_000 picoseconds. - Weight::from_parts(5_630_000, 3493) + // Minimum execution time: 9_005_000 picoseconds. + Weight::from_parts(9_392_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -875,8 +895,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 10_249_000 picoseconds. - Weight::from_parts(10_529_000, 4681) + // Minimum execution time: 19_043_000 picoseconds. + Weight::from_parts(20_089_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -884,8 +904,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 120_000 picoseconds. - Weight::from_parts(140_000, 0) + // Minimum execution time: 149_000 picoseconds. + Weight::from_parts(183_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -893,8 +913,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_402_000 picoseconds. - Weight::from_parts(1_513_000, 0) + // Minimum execution time: 2_248_000 picoseconds. + Weight::from_parts(2_425_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::RevenueInbox` (r:0 w:1) @@ -903,8 +923,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_902_000 picoseconds. - Weight::from_parts(2_116_000, 0) + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_640_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -913,16 +933,16 @@ impl WeightInfo for () { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `4068` - // Minimum execution time: 8_897_000 picoseconds. - Weight::from_parts(9_218_000, 4068) + // Measured: `441` + // Estimated: `1516` + // Minimum execution time: 17_083_000 picoseconds. + Weight::from_parts(18_077_000, 1516) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -930,18 +950,11 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 4_678_000 picoseconds. - Weight::from_parts(4_920_000, 1526) + // Minimum execution time: 11_620_000 picoseconds. + Weight::from_parts(12_063_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - fn on_new_timeslice() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 229_000 picoseconds. - Weight::from_parts(268_000, 0) - } /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) @@ -952,32 +965,35 @@ impl WeightInfo for () { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `930` + // Measured: `1121` // Estimated: `4698` - // Minimum execution time: 51_597_000 picoseconds. - Weight::from_parts(52_609_000, 4698) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 85_270_000 picoseconds. + Weight::from_parts(90_457_000, 4698) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `578` // Estimated: `1586` - // Minimum execution time: 8_907_000 picoseconds. - Weight::from_parts(9_167_000, 1586) + // Minimum execution time: 22_479_000 picoseconds. + Weight::from_parts(23_687_000, 1586) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} \ No newline at end of file + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 245_000 picoseconds. + Weight::from_parts(290_000, 0) + } +} diff --git a/substrate/frame/child-bounties/src/benchmarking.rs b/substrate/frame/child-bounties/src/benchmarking.rs index 4b2d62cd920e..2864f3ab5048 100644 --- a/substrate/frame/child-bounties/src/benchmarking.rs +++ b/substrate/frame/child-bounties/src/benchmarking.rs @@ -22,7 +22,7 @@ use alloc::vec; use frame_benchmarking::{v2::*, BenchmarkError}; use frame_support::ensure; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use frame_system::RawOrigin; use pallet_bounties::Pallet as Bounties; use pallet_treasury::Pallet as Treasury; use sp_runtime::traits::BlockNumberProvider; diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index ea1d9547d465..9fca26510989 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -79,7 +79,9 @@ use sp_runtime::{ }; use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::*; +use frame_system::pallet_prelude::{ + ensure_signed, BlockNumberFor as SystemBlockNumberFor, OriginFor, +}; use pallet_bounties::BountyStatus; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -90,6 +92,8 @@ type BalanceOf = pallet_treasury::BalanceOf; type BountiesError = pallet_bounties::Error; type BountyIndex = pallet_bounties::BountyIndex; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A child bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -810,7 +814,7 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet { + impl Hooks> for Pallet { fn integrity_test() { let parent_bounty_id: BountyIndex = 1; let child_bounty_id: BountyIndex = 2; diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 316ea6813048..96351752918a 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -119,6 +119,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "wasm-instrument", "xcm-builder/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index d6e2d51ef452..66137bc8a0c6 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -87,4 +87,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index c3b6e3273f34..b01d0aa4fa48 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -399,6 +399,7 @@ impl pallet_proxy::Config for Test { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_dummy::Config for Test {} diff --git a/substrate/frame/conviction-voting/src/lib.rs b/substrate/frame/conviction-voting/src/lib.rs index 85da1aed3c27..31bd6b85ec86 100644 --- a/substrate/frame/conviction-voting/src/lib.rs +++ b/substrate/frame/conviction-voting/src/lib.rs @@ -171,10 +171,12 @@ pub mod pallet { Delegated(T::AccountId, T::AccountId), /// An \[account\] has cancelled a previous delegation operation. Undelegated(T::AccountId), - /// An account that has voted + /// An account has voted Voted { who: T::AccountId, vote: AccountVote> }, - /// A vote that been removed + /// A vote has been removed VoteRemoved { who: T::AccountId, vote: AccountVote> }, + /// The lockup period of a conviction vote expired, and the funds have been unlocked. + VoteUnlocked { who: T::AccountId, class: ClassOf }, } #[pallet::error] @@ -315,6 +317,7 @@ pub mod pallet { ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; Self::update_lock(&class, &target); + Self::deposit_event(Event::VoteUnlocked { who: target, class }); Ok(()) } diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 37cdd7a5b338..dd9ee33ee183 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -238,27 +238,52 @@ fn basic_stuff() { fn basic_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(2, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(2, 5), + })); assert_eq!(tally(3), Tally::from_parts(10, 0, 2)); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(2, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: nay(2, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 10, 0)); assert_eq!(Balances::usable_balance(1), 8); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(5, 1))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(5, 1), + })); assert_eq!(tally(3), Tally::from_parts(5, 0, 5)); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(5, 1))); assert_eq!(tally(3), Tally::from_parts(0, 5, 0)); assert_eq!(Balances::usable_balance(1), 5); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(10, 0))); assert_eq!(tally(3), Tally::from_parts(0, 1, 0)); assert_eq!(Balances::usable_balance(1), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: nay(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteUnlocked { + who: 1, + class: class(3), + })); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -267,15 +292,32 @@ fn basic_voting_works() { fn split_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split(10, 0))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split(5, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split(5, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 5)); assert_eq!(Balances::usable_balance(1), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: split(5, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteUnlocked { + who: 1, + class: class(3), + })); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -284,25 +326,48 @@ fn split_voting_works() { fn abstain_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split_abstain(0, 0, 10))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split_abstain(0, 0, 10), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 10)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, split_abstain(0, 0, 20))); - assert_eq!(tally(3), Tally::from_parts(0, 0, 30)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, split_abstain(10, 0, 10))); - assert_eq!(tally(3), Tally::from_parts(1, 0, 30)); + + assert_ok!(Voting::vote(RuntimeOrigin::signed(6), 3, split_abstain(10, 0, 20))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 6, + vote: split_abstain(10, 0, 20), + })); + assert_eq!(tally(3), Tally::from_parts(1, 0, 40)); + + assert_ok!(Voting::vote(RuntimeOrigin::signed(6), 3, split_abstain(0, 0, 40))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 6, + vote: split_abstain(0, 0, 40), + })); + + assert_eq!(tally(3), Tally::from_parts(0, 0, 50)); assert_eq!(Balances::usable_balance(1), 0); - assert_eq!(Balances::usable_balance(2), 0); + assert_eq!(Balances::usable_balance(6), 20); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); - assert_eq!(tally(3), Tally::from_parts(1, 0, 20)); - - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(2), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: split_abstain(0, 0, 10), + })); + assert_eq!(tally(3), Tally::from_parts(0, 0, 40)); + + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(6), Some(class(3)), 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 6, + vote: split_abstain(0, 0, 40), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); assert_eq!(Balances::usable_balance(1), 10); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(2), class(3), 2)); - assert_eq!(Balances::usable_balance(2), 20); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(6), class(3), 6)); + assert_eq!(Balances::usable_balance(6), 60); }); } diff --git a/substrate/frame/conviction-voting/src/types.rs b/substrate/frame/conviction-voting/src/types.rs index d6bbb678a14b..aa7dd578fbad 100644 --- a/substrate/frame/conviction-voting/src/types.rs +++ b/substrate/frame/conviction-voting/src/types.rs @@ -117,14 +117,9 @@ impl< pub fn from_parts( ayes_with_conviction: Votes, nays_with_conviction: Votes, - ayes: Votes, + support: Votes, ) -> Self { - Self { - ayes: ayes_with_conviction, - nays: nays_with_conviction, - support: ayes, - dummy: PhantomData, - } + Self { ayes: ayes_with_conviction, nays: nays_with_conviction, support, dummy: PhantomData } } /// Add an account's vote into the tally. diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index ee36e9212f52..f9c810e56192 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -17,9 +17,11 @@ //! Democracy pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use super::*; -use frame_benchmarking::v1::{account, benchmarks, whitelist_account, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::{ assert_noop, assert_ok, traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, @@ -94,11 +96,15 @@ fn note_preimage() -> T::Hash { hash } -benchmarks! { - propose { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn propose() -> Result<(), BenchmarkError> { let p = T::MaxProposals::get(); - for i in 0 .. (p - 1) { + for i in 0..(p - 1) { add_proposal::(i)?; } @@ -106,18 +112,22 @@ benchmarks! { let proposal = make_proposal::(0); let value = T::MinimumDeposit::get(); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal, value) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), proposal, value); + assert_eq!(PublicProps::::get().len(), p as usize, "Proposals not created."); + Ok(()) } - second { + #[benchmark] + fn second() -> Result<(), BenchmarkError> { let caller = funded_account::("caller", 0); add_proposal::(0)?; // Create s existing "seconds" // we must reserve one deposit for the `proposal` and one for our benchmarked `second` call. - for i in 0 .. T::MaxDeposits::get() - 2 { + for i in 0..T::MaxDeposits::get() - 2 { let seconder = funded_account::("seconder", i); Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; } @@ -125,20 +135,32 @@ benchmarks! { let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (T::MaxDeposits::get() - 1) as usize, "Seconds not recorded"); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), 0) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), 0); + let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; - assert_eq!(deposits.0.len(), (T::MaxDeposits::get()) as usize, "`second` benchmark did not work"); + assert_eq!( + deposits.0.len(), + (T::MaxDeposits::get()) as usize, + "`second` benchmark did not work" + ); + Ok(()) } - vote_new { + #[benchmark] + fn vote_new() -> Result<(), BenchmarkError> { let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0 .. T::MaxVotes::get() - 1 { + for i in 0..T::MaxVotes::get() - 1 { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(caller.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -148,23 +170,32 @@ benchmarks! { let ref_index = add_referendum::(T::MaxVotes::get() - 1).0; whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote) - verify { + + #[extrinsic_call] + vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote); + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; + assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was not recorded."); + Ok(()) } - vote_existing { + #[benchmark] + fn vote_existing() -> Result<(), BenchmarkError> { let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes for i in 0..T::MaxVotes::get() { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(caller.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -179,43 +210,50 @@ benchmarks! { // This tests when a user changes a vote whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote) - verify { + + #[extrinsic_call] + vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote); + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was incorrectly added"); - let referendum_info = ReferendumInfoOf::::get(ref_index) - .ok_or("referendum doesn't exist")?; - let tally = match referendum_info { + let referendum_info = + ReferendumInfoOf::::get(ref_index).ok_or("referendum doesn't exist")?; + let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, _ => return Err("referendum not ongoing".into()), }; assert_eq!(tally.nays, 1000u32.into(), "changed vote was not recorded"); + Ok(()) } - emergency_cancel { - let origin = - T::CancellationOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + #[benchmark] + fn emergency_cancel() -> Result<(), BenchmarkError> { + let origin = T::CancellationOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; let (ref_index, _, preimage_hash) = add_referendum::(0); assert_ok!(Democracy::::referendum_status(ref_index)); - }: _(origin, ref_index) - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, ref_index); // Referendum has been canceled - assert_noop!( - Democracy::::referendum_status(ref_index), - Error::::ReferendumInvalid, + assert_noop!(Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid,); + assert_last_event::( + crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(ref_index), + hash: preimage_hash, + } + .into(), ); - assert_last_event::(crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(ref_index), - hash: preimage_hash, - }.into()); + Ok(()) } - blacklist { + #[benchmark] + fn blacklist() -> Result<(), BenchmarkError> { // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() - 1 { + for i in 0..T::MaxProposals::get() - 1 { add_proposal::(i)?; } // We should really add a lot of seconds here, but we're not doing it elsewhere. @@ -231,21 +269,24 @@ benchmarks! { )); let origin = T::BlacklistOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _(origin, hash, Some(ref_index)) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, hash, Some(ref_index)); + // Referendum has been canceled - assert_noop!( - Democracy::::referendum_status(ref_index), - Error::::ReferendumInvalid + assert_noop!(Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid); + assert_has_event::( + crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(ref_index), + hash: preimage_hash, + } + .into(), ); - assert_has_event::(crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(ref_index), - hash: preimage_hash, - }.into()); + Ok(()) } // Worst case scenario, we external propose a previously blacklisted proposal - external_propose { + #[benchmark] + fn external_propose() -> Result<(), BenchmarkError> { let origin = T::ExternalOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); @@ -258,33 +299,42 @@ benchmarks! { .try_into() .unwrap(); Blacklist::::insert(proposal.hash(), (BlockNumberFor::::zero(), addresses)); - }: _(origin, proposal) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, proposal); + // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); + Ok(()) } - external_propose_majority { + #[benchmark] + fn external_propose_majority() -> Result<(), BenchmarkError> { let origin = T::ExternalMajorityOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); - }: _(origin, proposal) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, proposal); + // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); + Ok(()) } - external_propose_default { + #[benchmark] + fn external_propose_default() -> Result<(), BenchmarkError> { let origin = T::ExternalDefaultOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); - }: _(origin, proposal) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, proposal); + // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); + Ok(()) } - fast_track { + #[benchmark] + fn fast_track() -> Result<(), BenchmarkError> { let origin_propose = T::ExternalDefaultOrigin::try_successful_origin() .expect("ExternalDefaultOrigin has no successful origin required for the benchmark"); let proposal = make_proposal::(0); @@ -295,23 +345,30 @@ benchmarks! { assert_ok!(Democracy::::set_metadata( origin_propose, MetadataOwner::External, - Some(preimage_hash))); + Some(preimage_hash) + )); // NOTE: Instant origin may invoke a little bit more logic, but may not always succeed. let origin_fast_track = T::FastTrackOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let voting_period = T::FastTrackVotingPeriod::get(); let delay = 0u32; - }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) - verify { + #[extrinsic_call] + _(origin_fast_track as T::RuntimeOrigin, proposal_hash, voting_period, delay.into()); + assert_eq!(ReferendumCount::::get(), 1, "referendum not created"); - assert_last_event::(crate::Event::MetadataTransferred { - prev_owner: MetadataOwner::External, - owner: MetadataOwner::Referendum(0), - hash: preimage_hash, - }.into()); + assert_last_event::( + crate::Event::MetadataTransferred { + prev_owner: MetadataOwner::External, + owner: MetadataOwner::Referendum(0), + hash: preimage_hash, + } + .into(), + ); + Ok(()) } - veto_external { + #[benchmark] + fn veto_external() -> Result<(), BenchmarkError> { let proposal = make_proposal::(0); let proposal_hash = proposal.hash(); @@ -323,28 +380,32 @@ benchmarks! { assert_ok!(Democracy::::set_metadata( origin_propose, MetadataOwner::External, - Some(preimage_hash)) - ); + Some(preimage_hash) + )); let mut vetoers: BoundedVec = Default::default(); - for i in 0 .. (T::MaxBlacklisted::get() - 1) { + for i in 0..(T::MaxBlacklisted::get() - 1) { vetoers.try_push(account::("vetoer", i, SEED)).unwrap(); } vetoers.sort(); Blacklist::::insert(proposal_hash, (BlockNumberFor::::zero(), vetoers)); - let origin = T::VetoOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin = + T::VetoOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; ensure!(NextExternal::::get().is_some(), "no external proposal"); - }: _(origin, proposal_hash) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, proposal_hash); + assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = Blacklist::::get(&proposal_hash).ok_or("no blacklist")?; assert_eq!(new_vetoers.len(), T::MaxBlacklisted::get() as usize, "vetoers not added"); + Ok(()) } - cancel_proposal { + #[benchmark] + fn cancel_proposal() -> Result<(), BenchmarkError> { // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() { + for i in 0..T::MaxProposals::get() { add_proposal::(i)?; } // Add metadata to the first proposal. @@ -353,31 +414,41 @@ benchmarks! { assert_ok!(Democracy::::set_metadata( RawOrigin::Signed(proposer).into(), MetadataOwner::Proposal(0), - Some(preimage_hash))); + Some(preimage_hash) + )); let cancel_origin = T::CancelProposalOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; - }: _(cancel_origin, 0) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner: MetadataOwner::Proposal(0), - hash: preimage_hash, - }.into()); + #[extrinsic_call] + _(cancel_origin as T::RuntimeOrigin, 0); + + assert_last_event::( + crate::Event::MetadataCleared { + owner: MetadataOwner::Proposal(0), + hash: preimage_hash, + } + .into(), + ); + Ok(()) } - cancel_referendum { + #[benchmark] + fn cancel_referendum() -> Result<(), BenchmarkError> { let (ref_index, _, preimage_hash) = add_referendum::(0); - }: _(RawOrigin::Root, ref_index) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(0), - hash: preimage_hash, - }.into()); - } + #[extrinsic_call] + _(RawOrigin::Root, ref_index); - #[extra] - on_initialize_external { - let r in 0 .. REFERENDUM_COUNT_HINT; + assert_last_event::( + crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(0), + hash: preimage_hash, + } + .into(), + ); + Ok(()) + } + #[benchmark(extra)] + fn on_initialize_external(r: Linear<0, REFERENDUM_COUNT_HINT>) -> Result<(), BenchmarkError> { for i in 0..r { add_referendum::(i); } @@ -397,14 +468,17 @@ benchmarks! { let block_number = T::LaunchPeriod::get(); - }: { Democracy::::on_initialize(block_number) } - verify { + #[block] + { + Democracy::::on_initialize(block_number); + } + // One extra because of next external assert_eq!(ReferendumCount::::get(), r + 1, "referenda not created"); ensure!(!NextExternal::::exists(), "External wasn't taken"); // All but the new next external should be finished - for i in 0 .. r { + for i in 0..r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { ReferendumInfo::Finished { .. } => (), @@ -412,12 +486,13 @@ benchmarks! { } } } + Ok(()) } - #[extra] - on_initialize_public { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark(extra)] + fn on_initialize_public( + r: Linear<0, { T::MaxVotes::get() - 1 }>, + ) -> Result<(), BenchmarkError> { for i in 0..r { add_referendum::(i); } @@ -430,13 +505,16 @@ benchmarks! { let block_number = T::LaunchPeriod::get(); - }: { Democracy::::on_initialize(block_number) } - verify { + #[block] + { + Democracy::::on_initialize(block_number); + } + // One extra because of next public assert_eq!(ReferendumCount::::get(), r + 1, "proposal not accepted"); // All should be finished - for i in 0 .. r { + for i in 0..r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { ReferendumInfo::Finished { .. } => (), @@ -444,12 +522,12 @@ benchmarks! { } } } + Ok(()) } // No launch no maturing referenda. - on_initialize_base { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn on_initialize_base(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { for i in 0..r { add_referendum::(i); } @@ -464,22 +542,28 @@ benchmarks! { assert_eq!(ReferendumCount::::get(), r, "referenda not created"); assert_eq!(LowestUnbaked::::get(), 0, "invalid referenda init"); - }: { Democracy::::on_initialize(1u32.into()) } - verify { + #[block] + { + Democracy::::on_initialize(1u32.into()); + } + // All should be on going - for i in 0 .. r { + for i in 0..r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { - ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), + ReferendumInfo::Finished { .. } => + return Err("Referendum has been finished".into()), ReferendumInfo::Ongoing(_) => (), } } } + Ok(()) } - on_initialize_base_with_launch_period { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn on_initialize_base_with_launch_period( + r: Linear<0, { T::MaxVotes::get() - 1 }>, + ) -> Result<(), BenchmarkError> { for i in 0..r { add_referendum::(i); } @@ -496,22 +580,26 @@ benchmarks! { let block_number = T::LaunchPeriod::get(); - }: { Democracy::::on_initialize(block_number) } - verify { + #[block] + { + Democracy::::on_initialize(block_number); + } + // All should be on going - for i in 0 .. r { + for i in 0..r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { - ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), + ReferendumInfo::Finished { .. } => + return Err("Referendum has been finished".into()), ReferendumInfo::Ongoing(_) => (), } } } + Ok(()) } - delegate { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn delegate(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -538,7 +626,11 @@ benchmarks! { // We need to create existing direct votes for the `new_delegate` for i in 0..r { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(new_delegate.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&new_delegate) { Voting::Direct { votes, .. } => votes, @@ -546,8 +638,15 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), new_delegate_lookup, Conviction::Locked1x, delegated_balance) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + new_delegate_lookup, + Conviction::Locked1x, + delegated_balance, + ); + let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), _ => return Err("Votes are not direct".into()), @@ -559,11 +658,11 @@ benchmarks! { _ => return Err("Votes are not direct".into()), }; assert_eq!(delegations.capital, delegated_balance, "delegation was not recorded."); + Ok(()) } - undelegate { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn undelegate(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -590,7 +689,7 @@ benchmarks! { Democracy::::vote( RawOrigin::Signed(the_delegate.clone()).into(), ref_index, - account_vote + account_vote, )?; } let votes = match VotingOf::::get(&the_delegate) { @@ -599,31 +698,38 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + // Voting should now be direct match VotingOf::::get(&caller) { Voting::Direct { .. } => (), _ => return Err("undelegation failed".into()), } + Ok(()) } - clear_public_proposals { + #[benchmark] + fn clear_public_proposals() -> Result<(), BenchmarkError> { add_proposal::(0)?; - }: _(RawOrigin::Root) + #[extrinsic_call] + _(RawOrigin::Root); - // Test when unlock will remove locks - unlock_remove { - let r in 0 .. (T::MaxVotes::get() - 1); + Ok(()) + } + // Test when unlock will remove locks + #[benchmark] + fn unlock_remove(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); // Vote and immediately unvote - for i in 0 .. r { + for i in 0..r { let ref_index = add_referendum::(i).0; Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; @@ -631,23 +737,25 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker_lookup) - verify { + + #[extrinsic_call] + unlock(RawOrigin::Signed(caller), locker_lookup); + // Note that we may want to add a `get_lock` api to actually verify let voting = VotingOf::::get(&locker); assert_eq!(voting.locked_balance(), BalanceOf::::zero()); + Ok(()) } // Test when unlock will set a new value - unlock_set { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn unlock_set(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); - for i in 0 .. r { + for i in 0..r { let ref_index = add_referendum::(i).0; Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; } @@ -670,8 +778,10 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker_lookup) - verify { + + #[extrinsic_call] + unlock(RawOrigin::Signed(caller), locker_lookup); + let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), @@ -681,17 +791,21 @@ benchmarks! { let voting = VotingOf::::get(&locker); // Note that we may want to add a `get_lock` api to actually verify assert_eq!(voting.locked_balance(), if r > 0 { base_balance } else { 0u32.into() }); + Ok(()) } - remove_vote { - let r in 1 .. T::MaxVotes::get(); - + #[benchmark] + fn remove_vote(r: Linear<1, { T::MaxVotes::get() }>) -> Result<(), BenchmarkError> { let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); - for i in 0 .. r { + for i in 0..r { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(caller.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&caller) { @@ -702,26 +816,32 @@ benchmarks! { let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), ref_index) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), ref_index); + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); + Ok(()) } // Worst case is when target == caller and referendum is ongoing - remove_other_vote { - let r in 1 .. T::MaxVotes::get(); - + #[benchmark] + fn remove_other_vote(r: Linear<1, { T::MaxVotes::get() }>) -> Result<(), BenchmarkError> { let caller = funded_account::("caller", r); let caller_lookup = T::Lookup::unlookup(caller.clone()); let account_vote = account_vote::(100u32.into()); - for i in 0 .. r { + for i in 0..r { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(caller.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&caller) { @@ -732,68 +852,71 @@ benchmarks! { let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index); + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); + Ok(()) } - set_external_metadata { + #[benchmark] + fn set_external_metadata() -> Result<(), BenchmarkError> { let origin = T::ExternalOrigin::try_successful_origin() .expect("ExternalOrigin has no successful origin required for the benchmark"); - assert_ok!( - Democracy::::external_propose(origin.clone(), make_proposal::(0)) - ); + assert_ok!(Democracy::::external_propose(origin.clone(), make_proposal::(0))); let owner = MetadataOwner::External; let hash = note_preimage::(); - }: set_metadata(origin, owner.clone(), Some(hash)) - verify { - assert_last_event::(crate::Event::MetadataSet { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata(origin as T::RuntimeOrigin, owner.clone(), Some(hash)); + + assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); + Ok(()) } - clear_external_metadata { + #[benchmark] + fn clear_external_metadata() -> Result<(), BenchmarkError> { let origin = T::ExternalOrigin::try_successful_origin() .expect("ExternalOrigin has no successful origin required for the benchmark"); - assert_ok!( - Democracy::::external_propose(origin.clone(), make_proposal::(0)) - ); + assert_ok!(Democracy::::external_propose(origin.clone(), make_proposal::(0))); let owner = MetadataOwner::External; - let proposer = funded_account::("proposer", 0); + let _proposer = funded_account::("proposer", 0); let hash = note_preimage::(); assert_ok!(Democracy::::set_metadata(origin.clone(), owner.clone(), Some(hash))); - }: set_metadata(origin, owner.clone(), None) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata(origin as T::RuntimeOrigin, owner.clone(), None); + + assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); + Ok(()) } - set_proposal_metadata { + #[benchmark] + fn set_proposal_metadata() -> Result<(), BenchmarkError> { // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() { + for i in 0..T::MaxProposals::get() { add_proposal::(i)?; } let owner = MetadataOwner::Proposal(0); let proposer = funded_account::("proposer", 0); let hash = note_preimage::(); - }: set_metadata(RawOrigin::Signed(proposer).into(), owner.clone(), Some(hash)) - verify { - assert_last_event::(crate::Event::MetadataSet { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata(RawOrigin::Signed(proposer), owner.clone(), Some(hash)); + + assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); + Ok(()) } - clear_proposal_metadata { + #[benchmark] + fn clear_proposal_metadata() -> Result<(), BenchmarkError> { // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() { + for i in 0..T::MaxProposals::get() { add_proposal::(i)?; } let proposer = funded_account::("proposer", 0); @@ -802,33 +925,36 @@ benchmarks! { assert_ok!(Democracy::::set_metadata( RawOrigin::Signed(proposer.clone()).into(), owner.clone(), - Some(hash))); - }: set_metadata(RawOrigin::Signed(proposer).into(), owner.clone(), None) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner, - hash, - }.into()); + Some(hash) + )); + + #[extrinsic_call] + set_metadata::(RawOrigin::Signed(proposer), owner.clone(), None); + + assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); + Ok(()) } - set_referendum_metadata { + #[benchmark] + fn set_referendum_metadata() -> Result<(), BenchmarkError> { // create not ongoing referendum. ReferendumInfoOf::::insert( 0, ReferendumInfo::Finished { end: BlockNumberFor::::zero(), approved: true }, ); let owner = MetadataOwner::Referendum(0); - let caller = funded_account::("caller", 0); + let _caller = funded_account::("caller", 0); let hash = note_preimage::(); - }: set_metadata(RawOrigin::Root.into(), owner.clone(), Some(hash)) - verify { - assert_last_event::(crate::Event::MetadataSet { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata::(RawOrigin::Root, owner.clone(), Some(hash)); + + assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); + Ok(()) } - clear_referendum_metadata { + #[benchmark] + fn clear_referendum_metadata() -> Result<(), BenchmarkError> { // create not ongoing referendum. ReferendumInfoOf::::insert( 0, @@ -838,17 +964,13 @@ benchmarks! { let hash = note_preimage::(); MetadataOf::::insert(owner.clone(), hash); let caller = funded_account::("caller", 0); - }: set_metadata(RawOrigin::Signed(caller).into(), owner.clone(), None) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata::(RawOrigin::Signed(caller), owner.clone(), None); + + assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); + Ok(()) } - impl_benchmark_test_suite!( - Democracy, - crate::tests::new_test_ext(), - crate::tests::Test - ); + impl_benchmark_test_suite!(Democracy, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/substrate/frame/democracy/src/weights.rs b/substrate/frame/democracy/src/weights.rs index 0a2200a78b5d..765ee57f0eb3 100644 --- a/substrate/frame/democracy/src/weights.rs +++ b/substrate/frame/democracy/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_democracy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_democracy -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/democracy/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_democracy +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/democracy/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -96,8 +94,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 48_991_000 picoseconds. - Weight::from_parts(50_476_000, 18187) + // Minimum execution time: 49_681_000 picoseconds. + Weight::from_parts(51_578_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -107,8 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 43_129_000 picoseconds. - Weight::from_parts(45_076_000, 6695) + // Minimum execution time: 45_001_000 picoseconds. + Weight::from_parts(45_990_000, 6695) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -124,8 +122,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 63_761_000 picoseconds. - Weight::from_parts(65_424_000, 7260) + // Minimum execution time: 65_095_000 picoseconds. + Weight::from_parts(67_484_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -141,8 +139,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 66_543_000 picoseconds. - Weight::from_parts(69_537_000, 7260) + // Minimum execution time: 66_877_000 picoseconds. + Weight::from_parts(68_910_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -156,8 +154,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `399` // Estimated: `3666` - // Minimum execution time: 28_934_000 picoseconds. - Weight::from_parts(29_982_000, 3666) + // Minimum execution time: 29_312_000 picoseconds. + Weight::from_parts(30_040_000, 3666) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -179,8 +177,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 108_004_000 picoseconds. - Weight::from_parts(110_779_000, 18187) + // Minimum execution time: 107_932_000 picoseconds. + Weight::from_parts(108_940_000, 18187) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -192,8 +190,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 17_630_000 picoseconds. - Weight::from_parts(18_419_000, 6703) + // Minimum execution time: 17_703_000 picoseconds. + Weight::from_parts(18_188_000, 6703) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -203,8 +201,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_572_000 picoseconds. - Weight::from_parts(2_810_000, 0) + // Minimum execution time: 2_672_000 picoseconds. + Weight::from_parts(2_814_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:0 w:1) @@ -213,8 +211,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_628_000 picoseconds. - Weight::from_parts(2_724_000, 0) + // Minimum execution time: 2_584_000 picoseconds. + Weight::from_parts(2_846_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:1 w:1) @@ -229,8 +227,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 24_624_000 picoseconds. - Weight::from_parts(25_518_000, 3518) + // Minimum execution time: 24_603_000 picoseconds. + Weight::from_parts(25_407_000, 3518) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -244,8 +242,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 31_786_000 picoseconds. - Weight::from_parts(32_786_000, 6703) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_785_000, 6703) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -261,8 +259,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 87_352_000 picoseconds. - Weight::from_parts(89_670_000, 18187) + // Minimum execution time: 86_981_000 picoseconds. + Weight::from_parts(89_140_000, 18187) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -274,8 +272,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `304` // Estimated: `3518` - // Minimum execution time: 17_561_000 picoseconds. - Weight::from_parts(18_345_000, 3518) + // Minimum execution time: 17_465_000 picoseconds. + Weight::from_parts(18_018_000, 3518) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -290,10 +288,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 6_801_000 picoseconds. - Weight::from_parts(8_524_132, 1489) - // Standard Error: 10_028 - .saturating_add(Weight::from_parts(4_073_619, 0).saturating_mul(r.into())) + // Minimum execution time: 6_746_000 picoseconds. + Weight::from_parts(7_381_932, 1489) + // Standard Error: 10_311 + .saturating_add(Weight::from_parts(4_107_935, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -316,10 +314,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 10_160_000 picoseconds. - Weight::from_parts(11_472_067, 18187) - // Standard Error: 10_730 - .saturating_add(Weight::from_parts(4_104_654, 0).saturating_mul(r.into())) + // Minimum execution time: 9_766_000 picoseconds. + Weight::from_parts(9_788_895, 18187) + // Standard Error: 11_913 + .saturating_add(Weight::from_parts(4_130_441, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -338,10 +336,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `863 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 49_741_000 picoseconds. - Weight::from_parts(53_544_421, 19800) - // Standard Error: 11_984 - .saturating_add(Weight::from_parts(5_123_946, 0).saturating_mul(r.into())) + // Minimum execution time: 48_992_000 picoseconds. + Weight::from_parts(55_524_560, 19800) + // Standard Error: 11_278 + .saturating_add(Weight::from_parts(4_987_109, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -357,10 +355,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `526 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 24_977_000 picoseconds. - Weight::from_parts(22_449_729, 13530) - // Standard Error: 10_846 - .saturating_add(Weight::from_parts(5_058_209, 0).saturating_mul(r.into())) + // Minimum execution time: 23_828_000 picoseconds. + Weight::from_parts(23_638_577, 13530) + // Standard Error: 10_946 + .saturating_add(Weight::from_parts(4_971_245, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -373,8 +371,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_700_000 picoseconds. - Weight::from_parts(3_028_000, 0) + // Minimum execution time: 2_759_000 picoseconds. + Weight::from_parts(2_850_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::VotingOf` (r:1 w:1) @@ -390,10 +388,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `596` // Estimated: `7260` - // Minimum execution time: 31_183_000 picoseconds. - Weight::from_parts(43_105_470, 7260) - // Standard Error: 3_096 - .saturating_add(Weight::from_parts(98_571, 0).saturating_mul(r.into())) + // Minimum execution time: 30_804_000 picoseconds. + Weight::from_parts(42_750_018, 7260) + // Standard Error: 3_300 + .saturating_add(Weight::from_parts(99_997, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -410,10 +408,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `597 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 39_672_000 picoseconds. - Weight::from_parts(44_120_387, 7260) - // Standard Error: 1_890 - .saturating_add(Weight::from_parts(130_089, 0).saturating_mul(r.into())) + // Minimum execution time: 39_946_000 picoseconds. + Weight::from_parts(44_500_306, 7260) + // Standard Error: 1_914 + .saturating_add(Weight::from_parts(116_987, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -426,10 +424,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 21_396_000 picoseconds. - Weight::from_parts(26_151_983, 7260) - // Standard Error: 2_052 - .saturating_add(Weight::from_parts(131_709, 0).saturating_mul(r.into())) + // Minimum execution time: 21_677_000 picoseconds. + Weight::from_parts(25_329_290, 7260) + // Standard Error: 1_998 + .saturating_add(Weight::from_parts(157_800, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -442,10 +440,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 21_425_000 picoseconds. - Weight::from_parts(26_335_367, 7260) - // Standard Error: 2_170 - .saturating_add(Weight::from_parts(130_502, 0).saturating_mul(r.into())) + // Minimum execution time: 21_777_000 picoseconds. + Weight::from_parts(26_635_600, 7260) + // Standard Error: 2_697 + .saturating_add(Weight::from_parts(135_641, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -461,8 +459,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3556` - // Minimum execution time: 19_765_000 picoseconds. - Weight::from_parts(20_266_000, 3556) + // Minimum execution time: 19_914_000 picoseconds. + Weight::from_parts(20_450_000, 3556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -474,8 +472,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 16_560_000 picoseconds. - Weight::from_parts(17_277_000, 3518) + // Minimum execution time: 16_212_000 picoseconds. + Weight::from_parts(16_745_000, 3518) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -491,8 +489,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4883` // Estimated: `18187` - // Minimum execution time: 47_711_000 picoseconds. - Weight::from_parts(48_669_000, 18187) + // Minimum execution time: 47_225_000 picoseconds. + Weight::from_parts(47_976_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -504,8 +502,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 43_809_000 picoseconds. - Weight::from_parts(45_698_000, 18187) + // Minimum execution time: 43_140_000 picoseconds. + Weight::from_parts(43_924_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -519,8 +517,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 14_736_000 picoseconds. - Weight::from_parts(15_191_000, 3556) + // Minimum execution time: 14_614_000 picoseconds. + Weight::from_parts(15_376_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -532,8 +530,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3666` - // Minimum execution time: 22_803_000 picoseconds. - Weight::from_parts(23_732_000, 3666) + // Minimum execution time: 22_588_000 picoseconds. + Weight::from_parts(23_267_000, 3666) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -553,8 +551,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 48_991_000 picoseconds. - Weight::from_parts(50_476_000, 18187) + // Minimum execution time: 49_681_000 picoseconds. + Weight::from_parts(51_578_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -564,8 +562,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 43_129_000 picoseconds. - Weight::from_parts(45_076_000, 6695) + // Minimum execution time: 45_001_000 picoseconds. + Weight::from_parts(45_990_000, 6695) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -581,8 +579,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 63_761_000 picoseconds. - Weight::from_parts(65_424_000, 7260) + // Minimum execution time: 65_095_000 picoseconds. + Weight::from_parts(67_484_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -598,8 +596,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 66_543_000 picoseconds. - Weight::from_parts(69_537_000, 7260) + // Minimum execution time: 66_877_000 picoseconds. + Weight::from_parts(68_910_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -613,8 +611,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `399` // Estimated: `3666` - // Minimum execution time: 28_934_000 picoseconds. - Weight::from_parts(29_982_000, 3666) + // Minimum execution time: 29_312_000 picoseconds. + Weight::from_parts(30_040_000, 3666) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -636,8 +634,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 108_004_000 picoseconds. - Weight::from_parts(110_779_000, 18187) + // Minimum execution time: 107_932_000 picoseconds. + Weight::from_parts(108_940_000, 18187) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -649,8 +647,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 17_630_000 picoseconds. - Weight::from_parts(18_419_000, 6703) + // Minimum execution time: 17_703_000 picoseconds. + Weight::from_parts(18_188_000, 6703) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -660,8 +658,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_572_000 picoseconds. - Weight::from_parts(2_810_000, 0) + // Minimum execution time: 2_672_000 picoseconds. + Weight::from_parts(2_814_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:0 w:1) @@ -670,8 +668,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_628_000 picoseconds. - Weight::from_parts(2_724_000, 0) + // Minimum execution time: 2_584_000 picoseconds. + Weight::from_parts(2_846_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:1 w:1) @@ -686,8 +684,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 24_624_000 picoseconds. - Weight::from_parts(25_518_000, 3518) + // Minimum execution time: 24_603_000 picoseconds. + Weight::from_parts(25_407_000, 3518) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -701,8 +699,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 31_786_000 picoseconds. - Weight::from_parts(32_786_000, 6703) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_785_000, 6703) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -718,8 +716,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 87_352_000 picoseconds. - Weight::from_parts(89_670_000, 18187) + // Minimum execution time: 86_981_000 picoseconds. + Weight::from_parts(89_140_000, 18187) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -731,8 +729,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `304` // Estimated: `3518` - // Minimum execution time: 17_561_000 picoseconds. - Weight::from_parts(18_345_000, 3518) + // Minimum execution time: 17_465_000 picoseconds. + Weight::from_parts(18_018_000, 3518) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -747,10 +745,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 6_801_000 picoseconds. - Weight::from_parts(8_524_132, 1489) - // Standard Error: 10_028 - .saturating_add(Weight::from_parts(4_073_619, 0).saturating_mul(r.into())) + // Minimum execution time: 6_746_000 picoseconds. + Weight::from_parts(7_381_932, 1489) + // Standard Error: 10_311 + .saturating_add(Weight::from_parts(4_107_935, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -773,10 +771,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 10_160_000 picoseconds. - Weight::from_parts(11_472_067, 18187) - // Standard Error: 10_730 - .saturating_add(Weight::from_parts(4_104_654, 0).saturating_mul(r.into())) + // Minimum execution time: 9_766_000 picoseconds. + Weight::from_parts(9_788_895, 18187) + // Standard Error: 11_913 + .saturating_add(Weight::from_parts(4_130_441, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -795,10 +793,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `863 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 49_741_000 picoseconds. - Weight::from_parts(53_544_421, 19800) - // Standard Error: 11_984 - .saturating_add(Weight::from_parts(5_123_946, 0).saturating_mul(r.into())) + // Minimum execution time: 48_992_000 picoseconds. + Weight::from_parts(55_524_560, 19800) + // Standard Error: 11_278 + .saturating_add(Weight::from_parts(4_987_109, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -814,10 +812,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `526 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 24_977_000 picoseconds. - Weight::from_parts(22_449_729, 13530) - // Standard Error: 10_846 - .saturating_add(Weight::from_parts(5_058_209, 0).saturating_mul(r.into())) + // Minimum execution time: 23_828_000 picoseconds. + Weight::from_parts(23_638_577, 13530) + // Standard Error: 10_946 + .saturating_add(Weight::from_parts(4_971_245, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -830,8 +828,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_700_000 picoseconds. - Weight::from_parts(3_028_000, 0) + // Minimum execution time: 2_759_000 picoseconds. + Weight::from_parts(2_850_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::VotingOf` (r:1 w:1) @@ -847,10 +845,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `596` // Estimated: `7260` - // Minimum execution time: 31_183_000 picoseconds. - Weight::from_parts(43_105_470, 7260) - // Standard Error: 3_096 - .saturating_add(Weight::from_parts(98_571, 0).saturating_mul(r.into())) + // Minimum execution time: 30_804_000 picoseconds. + Weight::from_parts(42_750_018, 7260) + // Standard Error: 3_300 + .saturating_add(Weight::from_parts(99_997, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -867,10 +865,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `597 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 39_672_000 picoseconds. - Weight::from_parts(44_120_387, 7260) - // Standard Error: 1_890 - .saturating_add(Weight::from_parts(130_089, 0).saturating_mul(r.into())) + // Minimum execution time: 39_946_000 picoseconds. + Weight::from_parts(44_500_306, 7260) + // Standard Error: 1_914 + .saturating_add(Weight::from_parts(116_987, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -883,10 +881,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 21_396_000 picoseconds. - Weight::from_parts(26_151_983, 7260) - // Standard Error: 2_052 - .saturating_add(Weight::from_parts(131_709, 0).saturating_mul(r.into())) + // Minimum execution time: 21_677_000 picoseconds. + Weight::from_parts(25_329_290, 7260) + // Standard Error: 1_998 + .saturating_add(Weight::from_parts(157_800, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -899,10 +897,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 21_425_000 picoseconds. - Weight::from_parts(26_335_367, 7260) - // Standard Error: 2_170 - .saturating_add(Weight::from_parts(130_502, 0).saturating_mul(r.into())) + // Minimum execution time: 21_777_000 picoseconds. + Weight::from_parts(26_635_600, 7260) + // Standard Error: 2_697 + .saturating_add(Weight::from_parts(135_641, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -918,8 +916,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3556` - // Minimum execution time: 19_765_000 picoseconds. - Weight::from_parts(20_266_000, 3556) + // Minimum execution time: 19_914_000 picoseconds. + Weight::from_parts(20_450_000, 3556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -931,8 +929,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 16_560_000 picoseconds. - Weight::from_parts(17_277_000, 3518) + // Minimum execution time: 16_212_000 picoseconds. + Weight::from_parts(16_745_000, 3518) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -948,8 +946,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4883` // Estimated: `18187` - // Minimum execution time: 47_711_000 picoseconds. - Weight::from_parts(48_669_000, 18187) + // Minimum execution time: 47_225_000 picoseconds. + Weight::from_parts(47_976_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -961,8 +959,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 43_809_000 picoseconds. - Weight::from_parts(45_698_000, 18187) + // Minimum execution time: 43_140_000 picoseconds. + Weight::from_parts(43_924_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -976,8 +974,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 14_736_000 picoseconds. - Weight::from_parts(15_191_000, 3556) + // Minimum execution time: 14_614_000 picoseconds. + Weight::from_parts(15_376_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -989,8 +987,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3666` - // Minimum execution time: 22_803_000 picoseconds. - Weight::from_parts(23_732_000, 3666) + // Minimum execution time: 22_588_000 picoseconds. + Weight::from_parts(23_267_000, 3666) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/examples/authorization-tx-extension/src/tests.rs b/substrate/frame/examples/authorization-tx-extension/src/tests.rs index 8ca35c099556..5579e7a98416 100644 --- a/substrate/frame/examples/authorization-tx-extension/src/tests.rs +++ b/substrate/frame/examples/authorization-tx-extension/src/tests.rs @@ -24,7 +24,7 @@ use frame_support::{ pallet_prelude::{InvalidTransaction, TransactionValidityError}, }; use pallet_verify_signature::VerifySignature; -use sp_keyring::AccountKeyring; +use sp_keyring::Sr25519Keyring; use sp_runtime::{ generic::ExtensionVersion, traits::{Applyable, Checkable, IdentityLookup, TransactionExtension}, @@ -36,7 +36,7 @@ use crate::{extensions::AuthorizeCoownership, mock::*, pallet_assets}; #[test] fn create_asset_works() { new_test_ext().execute_with(|| { - let alice_keyring = AccountKeyring::Alice; + let alice_keyring = Sr25519Keyring::Alice; let alice_account = AccountId::from(alice_keyring.public()); // Simple call to create asset with Id `42`. let create_asset_call = @@ -99,9 +99,9 @@ fn create_asset_works() { #[test] fn create_coowned_asset_works() { new_test_ext().execute_with(|| { - let alice_keyring = AccountKeyring::Alice; - let bob_keyring = AccountKeyring::Bob; - let charlie_keyring = AccountKeyring::Charlie; + let alice_keyring = Sr25519Keyring::Alice; + let bob_keyring = Sr25519Keyring::Bob; + let charlie_keyring = Sr25519Keyring::Charlie; let alice_account = AccountId::from(alice_keyring.public()); let bob_account = AccountId::from(bob_keyring.public()); let charlie_account = AccountId::from(charlie_keyring.public()); @@ -189,9 +189,9 @@ fn create_coowned_asset_works() { #[test] fn inner_authorization_works() { new_test_ext().execute_with(|| { - let alice_keyring = AccountKeyring::Alice; - let bob_keyring = AccountKeyring::Bob; - let charlie_keyring = AccountKeyring::Charlie; + let alice_keyring = Sr25519Keyring::Alice; + let bob_keyring = Sr25519Keyring::Bob; + let charlie_keyring = Sr25519Keyring::Charlie; let charlie_account = AccountId::from(charlie_keyring.public()); // Simple call to create asset with Id `42`. let create_asset_call = diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index ccdcd4968598..f690bffe0998 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -62,10 +62,10 @@ pub mod pallet { type OverwrittenDefaultValue: Get; /// An input parameter that relies on `::AccountId`. This can - /// too have a default, as long as as it is present in `frame_system::DefaultConfig`. + /// too have a default, as long as it is present in `frame_system::DefaultConfig`. type CanDeriveDefaultFromSystem: Get; - /// We might chose to declare as one that doesn't have a default, for whatever semantical + /// We might choose to declare as one that doesn't have a default, for whatever semantical /// reason. #[pallet::no_default] type HasNoDefault: Get; diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml index bca2c3ffb198..8f4ba922984c 100644 --- a/substrate/frame/metadata-hash-extension/Cargo.toml +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -25,7 +25,7 @@ substrate-test-runtime-client = { workspace = true } sp-api = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } merkleized-metadata = { workspace = true } -frame-metadata = { features = ["current"], workspace = true, default-features = true } +frame-metadata = { features = ["current", "unstable"], workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index 4a30b5c119b9..869b4adc2adc 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -77,6 +77,9 @@ macro_rules! log { type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A global extrinsic index, formed as the extrinsic index within a block, together with that /// block's height. This allows a transaction in which a multisig operation of a particular /// composite was created to be uniquely identified. @@ -153,6 +156,9 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: weights::WeightInfo; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } /// The in-code storage version. @@ -235,7 +241,7 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet {} + impl Hooks> for Pallet {} #[pallet::call] impl Pallet { @@ -626,7 +632,7 @@ impl Pallet { /// The current `Timepoint`. pub fn timepoint() -> Timepoint> { Timepoint { - height: >::block_number(), + height: T::BlockNumberProvider::current_block_number(), index: >::extrinsic_index().unwrap_or_default(), } } diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs index c5a98845270c..4065ce73f905 100644 --- a/substrate/frame/multisig/src/tests.rs +++ b/substrate/frame/multisig/src/tests.rs @@ -66,6 +66,7 @@ impl Config for Test { type DepositFactor = ConstU64<1>; type MaxSignatories = ConstU32<3>; type WeightInfo = (); + type BlockNumberProvider = frame_system::Pallet; } use pallet_balances::Call as BalancesCall; diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 50b41b5fc64e..762c1776e30f 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -115,6 +115,7 @@ impl pallet_nfts::Config for Test { type OffchainSignature = Signature; type OffchainPublic = AccountPublic; type WeightInfo = (); + type BlockNumberProvider = frame_system::Pallet; pallet_nfts::runtime_benchmarks_enabled! { type Helper = (); } diff --git a/substrate/frame/nfts/src/benchmarking.rs b/substrate/frame/nfts/src/benchmarking.rs index bc81096b459d..81828be5fa09 100644 --- a/substrate/frame/nfts/src/benchmarking.rs +++ b/substrate/frame/nfts/src/benchmarking.rs @@ -29,7 +29,7 @@ use frame_support::{ traits::{EnsureOrigin, Get, UnfilteredDispatchable}, BoundedVec, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; +use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::{Bounded, One}; use crate::Pallet as Nfts; @@ -577,7 +577,7 @@ benchmarks_instance_pallet! { let (item, ..) = mint_item::(0); let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup, Some(deadline)) verify { assert_last_event::(Event::TransferApproved { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); @@ -589,7 +589,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) verify { @@ -602,7 +602,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item) verify { @@ -712,10 +712,10 @@ benchmarks_instance_pallet! { let price_direction = PriceDirection::Receive; let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; let duration = T::MaxDeadlineDuration::get(); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(caller.clone()), collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration) verify { - let current_block = frame_system::Pallet::::block_number(); + let current_block = T::BlockNumberProvider::current_block_number(); assert_last_event::(Event::SwapCreated { offered_collection: collection, offered_item: item1, @@ -735,7 +735,7 @@ benchmarks_instance_pallet! { let duration = T::MaxDeadlineDuration::get(); let price_direction = PriceDirection::Receive; let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); Nfts::::create_swap(origin, collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration)?; }: _(SystemOrigin::Signed(caller.clone()), collection, item1) verify { @@ -761,7 +761,7 @@ benchmarks_instance_pallet! { let target_lookup = T::Lookup::unlookup(target.clone()); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let origin = SystemOrigin::Signed(caller.clone()); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); Nfts::::transfer(origin.clone().into(), collection, item2, target_lookup)?; Nfts::::create_swap( origin.clone().into(), @@ -774,7 +774,7 @@ benchmarks_instance_pallet! { )?; }: _(SystemOrigin::Signed(target.clone()), collection, item2, collection, item1, Some(price_with_direction.clone())) verify { - let current_block = frame_system::Pallet::::block_number(); + let current_block = T::BlockNumberProvider::current_block_number(); assert_last_event::(Event::SwapClaimed { sent_collection: collection, sent_item: item2, @@ -822,7 +822,7 @@ benchmarks_instance_pallet! { let target: T::AccountId = account("target", 0, SEED); T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(target.clone()), Box::new(mint_data), signature.into(), caller) verify { let metadata: BoundedVec<_, _> = metadata.try_into().unwrap(); @@ -865,7 +865,7 @@ benchmarks_instance_pallet! { let message = Encode::encode(&pre_signed_data); let signature = T::Helper::sign(&signer_public, &message); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(item_owner.clone()), pre_signed_data, signature.into(), signer.clone()) verify { assert_last_event::( diff --git a/substrate/frame/nfts/src/features/approvals.rs b/substrate/frame/nfts/src/features/approvals.rs index 053fa67163b9..4738f69f83c4 100644 --- a/substrate/frame/nfts/src/features/approvals.rs +++ b/substrate/frame/nfts/src/features/approvals.rs @@ -46,7 +46,7 @@ impl, I: 'static> Pallet { collection: T::CollectionId, item: T::ItemId, delegate: T::AccountId, - maybe_deadline: Option>, + maybe_deadline: Option>, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Approvals), @@ -65,7 +65,7 @@ impl, I: 'static> Pallet { ensure!(check_origin == details.owner, Error::::NoPermission); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let deadline = maybe_deadline.map(|d| d.saturating_add(now)); details @@ -111,7 +111,7 @@ impl, I: 'static> Pallet { let maybe_deadline = details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; let is_past_deadline = if let Some(deadline) = maybe_deadline { - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); now > *deadline } else { false diff --git a/substrate/frame/nfts/src/features/atomic_swap.rs b/substrate/frame/nfts/src/features/atomic_swap.rs index 830283b73c2a..03ebd35b81b2 100644 --- a/substrate/frame/nfts/src/features/atomic_swap.rs +++ b/substrate/frame/nfts/src/features/atomic_swap.rs @@ -53,7 +53,7 @@ impl, I: 'static> Pallet { desired_collection_id: T::CollectionId, maybe_desired_item_id: Option, maybe_price: Option>>, - duration: frame_system::pallet_prelude::BlockNumberFor, + duration: BlockNumberFor, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Swaps), @@ -76,7 +76,7 @@ impl, I: 'static> Pallet { ), }; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let deadline = duration.saturating_add(now); PendingSwapOf::::insert( @@ -119,7 +119,7 @@ impl, I: 'static> Pallet { let swap = PendingSwapOf::::get(&offered_collection_id, &offered_item_id) .ok_or(Error::::UnknownSwap)?; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); if swap.deadline > now { let item = Item::::get(&offered_collection_id, &offered_item_id) .ok_or(Error::::UnknownItem)?; @@ -187,7 +187,7 @@ impl, I: 'static> Pallet { ensure!(desired_item == send_item_id, Error::::UnknownSwap); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(now <= swap.deadline, Error::::DeadlineExpired); if let Some(ref price) = swap.price { diff --git a/substrate/frame/nfts/src/features/attributes.rs b/substrate/frame/nfts/src/features/attributes.rs index 28f7bd2c58ce..2cd09f7d2193 100644 --- a/substrate/frame/nfts/src/features/attributes.rs +++ b/substrate/frame/nfts/src/features/attributes.rs @@ -225,7 +225,7 @@ impl, I: 'static> Pallet { Error::::MaxAttributesLimitReached ); - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(deadline >= now, Error::::DeadlineExpired); let item_details = diff --git a/substrate/frame/nfts/src/features/create_delete_item.rs b/substrate/frame/nfts/src/features/create_delete_item.rs index 37f64ae1b1b9..57366127f142 100644 --- a/substrate/frame/nfts/src/features/create_delete_item.rs +++ b/substrate/frame/nfts/src/features/create_delete_item.rs @@ -145,7 +145,7 @@ impl, I: 'static> Pallet { ensure!(account == mint_to, Error::::WrongOrigin); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(deadline >= now, Error::::DeadlineExpired); ensure!( diff --git a/substrate/frame/nfts/src/features/settings.rs b/substrate/frame/nfts/src/features/settings.rs index d4f7533ffa4e..48719ae2c20e 100644 --- a/substrate/frame/nfts/src/features/settings.rs +++ b/substrate/frame/nfts/src/features/settings.rs @@ -96,11 +96,7 @@ impl, I: 'static> Pallet { pub(crate) fn do_update_mint_settings( maybe_check_origin: Option, collection: T::CollectionId, - mint_settings: MintSettings< - BalanceOf, - frame_system::pallet_prelude::BlockNumberFor, - T::CollectionId, - >, + mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, ) -> DispatchResult { if let Some(check_origin) = &maybe_check_origin { ensure!( diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index 4e5493a3c755..346ad162c503 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -58,7 +58,7 @@ use frame_support::traits::{ }; use frame_system::Config as SystemConfig; use sp_runtime::{ - traits::{IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, + traits::{BlockNumberProvider, IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, RuntimeDebug, }; @@ -76,7 +76,7 @@ type AccountIdLookupOf = <::Lookup as StaticLookup>::Sourc pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; - use frame_system::pallet_prelude::*; + use frame_system::{ensure_signed, pallet_prelude::OriginFor}; /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); @@ -210,7 +210,7 @@ pub mod pallet { /// The max duration in blocks for deadlines. #[pallet::constant] - type MaxDeadlineDuration: Get>; + type MaxDeadlineDuration: Get>; /// The max number of attributes a user could set per call. #[pallet::constant] @@ -242,6 +242,9 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } /// Details of a collection. @@ -388,7 +391,7 @@ pub mod pallet { T::CollectionId, T::ItemId, PriceWithDirection>, - BlockNumberFor, + BlockNumberFor, >, OptionQuery, >; @@ -459,7 +462,7 @@ pub mod pallet { item: T::ItemId, owner: T::AccountId, delegate: T::AccountId, - deadline: Option>, + deadline: Option>, }, /// An approval for a `delegate` account to transfer the `item` of an item /// `collection` was cancelled by its `owner`. @@ -554,7 +557,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// The swap was cancelled. SwapCancelled { @@ -563,7 +566,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// The swap has been claimed. SwapClaimed { @@ -574,7 +577,7 @@ pub mod pallet { received_item: T::ItemId, received_item_owner: T::AccountId, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// New attributes have been set for an `item` of the `collection`. PreSignedAttributesSet { @@ -857,7 +860,7 @@ pub mod pallet { item_config, |collection_details, collection_config| { let mint_settings = collection_config.mint_settings; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); if let Some(start_block) = mint_settings.start_block { ensure!(start_block <= now, Error::::MintNotStarted); @@ -1029,7 +1032,7 @@ pub mod pallet { let deadline = details.approvals.get(&origin).ok_or(Error::::NoPermission)?; if let Some(d) = deadline { - let block_number = frame_system::Pallet::::block_number(); + let block_number = T::BlockNumberProvider::current_block_number(); ensure!(block_number <= *d, Error::::ApprovalExpired); } } @@ -1290,7 +1293,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, delegate: AccountIdLookupOf, - maybe_deadline: Option>, + maybe_deadline: Option>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1713,7 +1716,7 @@ pub mod pallet { pub fn update_mint_settings( origin: OriginFor, collection: T::CollectionId, - mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, + mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1809,7 +1812,7 @@ pub mod pallet { desired_collection: T::CollectionId, maybe_desired_item: Option, maybe_price: Option>>, - duration: BlockNumberFor, + duration: BlockNumberFor, ) -> DispatchResult { let origin = ensure_signed(origin)?; Self::do_create_swap( diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 5b589f591ca3..291c3c081334 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -92,6 +92,7 @@ impl Config for Test { type WeightInfo = (); #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { diff --git a/substrate/frame/nfts/src/types.rs b/substrate/frame/nfts/src/types.rs index d67fb404ea79..3ab85993473a 100644 --- a/substrate/frame/nfts/src/types.rs +++ b/substrate/frame/nfts/src/types.rs @@ -27,9 +27,11 @@ use frame_support::{ traits::Get, BoundedBTreeMap, BoundedBTreeSet, }; -use frame_system::pallet_prelude::BlockNumberFor; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; +pub type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A type alias for handling balance deposits. pub type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; @@ -39,7 +41,7 @@ pub type CollectionDetailsFor = /// A type alias for keeping track of approvals used by a single item. pub type ApprovalsOf = BoundedBTreeMap< ::AccountId, - Option>, + Option>, >::ApprovalsLimit, >; /// A type alias for keeping track of approvals for an item's attributes. @@ -70,13 +72,13 @@ pub type ItemTipOf = ItemTip< >; /// A type alias for the settings configuration of a collection. pub type CollectionConfigFor = - CollectionConfig, BlockNumberFor, >::CollectionId>; + CollectionConfig, BlockNumberFor, >::CollectionId>; /// A type alias for the pre-signed minting configuration for a specified collection. pub type PreSignedMintOf = PreSignedMint< >::CollectionId, >::ItemId, ::AccountId, - BlockNumberFor, + BlockNumberFor, BalanceOf, >; /// A type alias for the pre-signed minting configuration on the attribute level of an item. @@ -84,7 +86,7 @@ pub type PreSignedAttributesOf = PreSignedAttributes< >::CollectionId, >::ItemId, ::AccountId, - BlockNumberFor, + BlockNumberFor, >; /// Information about a collection. diff --git a/substrate/frame/nomination-pools/runtime-api/src/lib.rs b/substrate/frame/nomination-pools/runtime-api/src/lib.rs index 4138dd22d898..644ee07fd634 100644 --- a/substrate/frame/nomination-pools/runtime-api/src/lib.rs +++ b/substrate/frame/nomination-pools/runtime-api/src/lib.rs @@ -43,6 +43,9 @@ sp_api::decl_runtime_apis! { fn pool_pending_slash(pool_id: PoolId) -> Balance; /// Returns the pending slash for a given pool member. + /// + /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on + /// chain. fn member_pending_slash(member: AccountId) -> Balance; /// Returns true if the pool with `pool_id` needs migration. diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 201b0af1d608..dc82bf3a37c6 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1944,6 +1944,8 @@ pub mod pallet { NothingToAdjust, /// No slash pending that can be applied to the member. NothingToSlash, + /// The slash amount is too low to be applied. + SlashTooLow, /// The pool or member delegation has already migrated to delegate stake. AlreadyMigrated, /// The pool or member delegation has not migrated yet to delegate stake. @@ -2300,7 +2302,7 @@ pub mod pallet { let slash_weight = // apply slash if any before withdraw. - match Self::do_apply_slash(&member_account, None) { + match Self::do_apply_slash(&member_account, None, false) { Ok(_) => T::WeightInfo::apply_slash(), Err(e) => { let no_pending_slash: DispatchResult = Err(Error::::NothingToSlash.into()); @@ -2974,8 +2976,10 @@ pub mod pallet { /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: /// [`adapter::StakeStrategyType::Delegate`]. /// - /// This call can be dispatched permissionlessly (i.e. by any account). If the member has - /// slash to be applied, caller may be rewarded with the part of the slash. + /// The pending slash amount of the member must be equal or more than `ExistentialDeposit`. + /// This call can be dispatched permissionlessly (i.e. by any account). If the execution + /// is successful, fee is refunded and caller may be rewarded with a part of the slash + /// based on the [`crate::pallet::Config::StakeAdapter`] configuration. #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::apply_slash())] pub fn apply_slash( @@ -2989,7 +2993,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let member_account = T::Lookup::lookup(member_account)?; - Self::do_apply_slash(&member_account, Some(who))?; + Self::do_apply_slash(&member_account, Some(who), true)?; // If successful, refund the fees. Ok(Pays::No.into()) @@ -3574,15 +3578,21 @@ impl Pallet { fn do_apply_slash( member_account: &T::AccountId, reporter: Option, + enforce_min_slash: bool, ) -> DispatchResult { let member = PoolMembers::::get(member_account).ok_or(Error::::PoolMemberNotFound)?; let pending_slash = Self::member_pending_slash(Member::from(member_account.clone()), member.clone())?; - // if nothing to slash, return error. + // ensure there is something to slash. ensure!(!pending_slash.is_zero(), Error::::NothingToSlash); + if enforce_min_slash { + // ensure slashed amount is at least the minimum balance. + ensure!(pending_slash >= T::Currency::minimum_balance(), Error::::SlashTooLow); + } + T::StakeAdapter::member_slash( Member::from(member_account.clone()), Pool::from(Pallet::::generate_bonded_account(member.pool_id)), @@ -3946,6 +3956,9 @@ impl Pallet { /// Returns the unapplied slash of a member. /// /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. + /// + /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on + /// chain via [`Call::apply_slash`]. pub fn api_member_pending_slash(who: T::AccountId) -> BalanceOf { PoolMembers::::get(who.clone()) .map(|pool_member| { diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml index 7940caaff775..70e1591409b8 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -26,7 +26,7 @@ sp-staking = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs index 40025cdbb3cd..cc6335959ab7 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -20,7 +20,7 @@ mod mock; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, hypothetically, traits::{fungible::InspectHold, Currency}, }; use mock::*; @@ -537,10 +537,10 @@ fn pool_slash_proportional() { // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that // happened in era 100 should only affect the latter two. new_test_ext().execute_with(|| { - ExistentialDeposit::set(1); + ExistentialDeposit::set(2); BondingDuration::set(28); - assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Balances::minimum_balance(), 2); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -670,6 +670,34 @@ fn pool_slash_proportional() { // no pending slash yet. assert_eq!(Pools::api_pool_pending_slash(1), 0); + // and therefore applying slash fails + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 21), + PoolsError::::NothingToSlash + ); + + hypothetically!({ + // a very small amount is slashed + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 3, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + // ensure correct amount is pending to be slashed + assert_eq!(Pools::api_pool_pending_slash(1), 3); + + // 21 has pending slash lower than ED (2) + assert_eq!(Pools::api_member_pending_slash(21), 1); + + // slash fails as minimum pending slash amount not met. + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 21), + PoolsError::::SlashTooLow + ); + }); pallet_staking::slashing::do_slash::( &POOL1_BONDED, @@ -909,6 +937,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ); }); } + #[test] fn pool_migration_e2e() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs index 573114de0742..75f3e9931e34 100644 --- a/substrate/frame/offences/benchmarking/src/inner.rs +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -19,7 +19,7 @@ use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{account, benchmarks}; +use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; @@ -144,7 +144,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' fn make_offenders( num_offenders: u32, num_nominators: u32, -) -> Result<(Vec>, Vec>), &'static str> { +) -> Result>, &'static str> { Staking::::new_session(0); let mut offenders = vec![]; @@ -167,21 +167,50 @@ fn make_offenders( .expect("failed to convert validator id to full identification") }) .collect::>>(); - Ok((id_tuples, offenders)) + Ok(id_tuples) } -benchmarks! { - where_clause { - where +#[cfg(test)] +fn assert_all_slashes_applied(offender_count: usize) +where + T: Config, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto, + ::RuntimeEvent: TryInto>, +{ + // make sure that all slashes have been applied + // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + + // reporter account endowed + some funds rescinded from issuance. + assert_eq!( + System::::read_events_for_pallet::>().len(), + 2 * (offender_count + 1) + 3 + ); + // (n nominators + one validator) * slashed + Slash Reported + assert_eq!( + System::::read_events_for_pallet::>().len(), + 1 * (offender_count + 1) + 1 + ); + // offence + assert_eq!(System::::read_events_for_pallet::().len(), 1); + // reporter new account + assert_eq!(System::::read_events_for_pallet::>().len(), 1); +} + +#[benchmarks( + where ::RuntimeEvent: TryInto>, ::RuntimeEvent: TryInto>, ::RuntimeEvent: TryInto, ::RuntimeEvent: TryInto>, - } - - report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - +)] +mod benchmarks { + use super::*; + + #[benchmark] + pub fn report_offence_grandpa( + n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, + ) -> Result<(), BenchmarkError> { // for grandpa equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -189,7 +218,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let mut offenders = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = GrandpaEquivocationOffence { @@ -199,28 +228,24 @@ benchmarks! { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { + + #[block] + { + let _ = Offences::::report_offence(reporters, offence); + } + #[cfg(test)] { - // make sure that all slashes have been applied - // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + reporter - // account endowed + some funds rescinded from issuance. - assert_eq!(System::::read_events_for_pallet::>().len(), 2 * (n + 1) as usize + 3); - // (n nominators + one validator) * slashed + Slash Reported - assert_eq!(System::::read_events_for_pallet::>().len(), 1 * (n + 1) as usize + 1); - // offence - assert_eq!(System::::read_events_for_pallet::().len(), 1); - // reporter new account - assert_eq!(System::::read_events_for_pallet::>().len(), 1); + assert_all_slashes_applied::(n as usize); } - } - report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + Ok(()) + } + #[benchmark] + fn report_offence_babe( + n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, + ) -> Result<(), BenchmarkError> { // for babe equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -228,7 +253,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let mut offenders = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = BabeEquivocationOffence { @@ -238,23 +263,17 @@ benchmarks! { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { + + #[block] + { + let _ = Offences::::report_offence(reporters, offence); + } #[cfg(test)] { - // make sure that all slashes have been applied - // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + reporter - // account endowed + some funds rescinded from issuance. - assert_eq!(System::::read_events_for_pallet::>().len(), 2 * (n + 1) as usize + 3); - // (n nominators + one validator) * slashed + Slash Reported - assert_eq!(System::::read_events_for_pallet::>().len(), 1 * (n + 1) as usize + 1); - // offence - assert_eq!(System::::read_events_for_pallet::().len(), 1); - // reporter new account - assert_eq!(System::::read_events_for_pallet::>().len(), 1); + assert_all_slashes_applied::(n as usize); } + + Ok(()) } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index efaec49a65b3..c5c178aa4443 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -29,7 +29,7 @@ use frame_system as system; use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, - BuildStorage, Perbill, + BuildStorage, KeyTypeId, Perbill, }; type AccountId = u64; @@ -66,7 +66,8 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + // corresponds to the opaque key id above + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index 3d0c5b900579..ea635bf3ef77 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -17,14 +17,13 @@ //! Preimage pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Pallet as Preimage; +use crate::*; fn funded_account() -> T::AccountId { let caller: T::AccountId = whitelisted_caller(); @@ -43,206 +42,225 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { (preimage, hash) } -benchmarks! { +fn insert_old_unrequested(s: u32) -> ::Hash { + let acc = account("old", s, 0); + T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); + + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash +} + +#[benchmarks] +mod benchmarks { + use super::*; + // Expensive note - will reserve. - note_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - }: _(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it was requested. - note_requested_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_requested_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( + assert_ok!(Pallet::::request_preimage( T::ManagerOrigin::try_successful_origin() .expect("ManagerOrigin has no successful origin required for the benchmark"), hash, )); - }: note_preimage(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + note_preimage(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it's the manager. - note_no_deposit_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_no_deposit_preimage(s: Linear<0, MAX_SIZE>) { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: note_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - preimage - ) verify { - assert!(Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + note_preimage(o as T::RuntimeOrigin, preimage); + + assert!(Pallet::::have_preimage(&hash)); } // Expensive unnote - will unreserve. - unnote_preimage { + #[benchmark] + fn unnote_preimage() { let caller = funded_account::(); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); - }: _(RawOrigin::Signed(caller), hash) - verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hash); + + assert!(!Pallet::::have_preimage(&hash)); } + // Cheap unnote - will not unreserve since there's no deposit held. - unnote_no_deposit_preimage { + #[benchmark] + fn unnote_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: unnote_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + unnote_preimage(o as T::RuntimeOrigin, hash); + + assert!(!Pallet::::have_preimage(&hash)); } // Expensive request - will unreserve the noter's deposit. - request_preimage { + #[benchmark] + fn request_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); - let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + + let ticket = + TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let s = RequestStatus::Requested { + maybe_ticket: Some((noter, ticket)), + count: 1, + maybe_len: Some(MAX_SIZE), + }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - would unreserve the deposit but none was held. - request_no_deposit_preimage { + #[benchmark] + fn request_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + + let s = + RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is not yet noted, so deposit to unreserve. - request_unnoted_preimage { + #[benchmark] + fn request_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is already requested, so just a counter bump. - request_requested_preimage { + #[benchmark] + fn request_requested_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. - unrequest_preimage { + #[benchmark] + fn unrequest_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - last reference, but it's not noted. - unrequest_unnoted_preimage { + #[benchmark] + fn unrequest_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - not the last reference. - unrequest_multi_referenced_preimage { + #[benchmark] + fn unrequest_multi_referenced_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - ensure_updated { - let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; - + #[benchmark] + fn ensure_updated(n: Linear<1, MAX_HASH_UPGRADE_BULK_COUNT>) { let caller = funded_account::(); let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); - }: _(RawOrigin::Signed(caller), hashes) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hashes); + assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); #[allow(deprecated)] let c = StatusFor::::iter_keys().count(); assert_eq!(c, 0); } - impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); -} - -fn insert_old_unrequested(s: u32) -> ::Hash { - let acc = account("old", s, 0); - T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); - - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index edb2eed9c75a..a3aec7e7546e 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_preimage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/preimage/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_preimage +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/preimage/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -84,10 +82,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -102,10 +100,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -120,10 +118,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -154,8 +152,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,8 +165,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,8 +178,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -193,8 +191,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -221,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -234,8 +232,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -247,8 +245,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,10 +265,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) @@ -295,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -313,10 +311,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -331,10 +329,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -350,8 +348,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -365,8 +363,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -378,8 +376,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -391,8 +389,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -404,8 +402,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -417,8 +415,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -432,8 +430,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -445,8 +443,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -458,8 +456,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -478,10 +476,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) diff --git a/substrate/frame/proxy/src/benchmarking.rs b/substrate/frame/proxy/src/benchmarking.rs index eebb506bf374..b72f53af8e72 100644 --- a/substrate/frame/proxy/src/benchmarking.rs +++ b/substrate/frame/proxy/src/benchmarking.rs @@ -22,7 +22,9 @@ use super::*; use crate::Pallet as Proxy; use alloc::{boxed::Box, vec}; -use frame::benchmarking::prelude::*; +use frame::benchmarking::prelude::{ + account, benchmarks, impl_test_function, whitelisted_caller, BenchmarkError, RawOrigin, +}; const SEED: u32 = 0; @@ -317,7 +319,7 @@ mod benchmarks { BlockNumberFor::::zero(), 0, )?; - let height = frame_system::Pallet::::block_number(); + let height = T::BlockNumberProvider::current_block_number(); let ext_index = frame_system::Pallet::::extrinsic_index().unwrap_or(0); let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index cc8aeedcc5f9..cc21db7469b2 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -47,6 +47,9 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The parameters under which a particular account has a proxy relationship with some other @@ -163,6 +166,9 @@ pub mod pallet { /// into a pre-existing storage value. #[pallet::constant] type AnnouncementDepositFactor: Get>; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } #[pallet::call] @@ -379,7 +385,7 @@ pub mod pallet { let announcement = Announcement { real: real.clone(), call_hash, - height: frame_system::Pallet::::block_number(), + height: T::BlockNumberProvider::current_block_number(), }; Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { @@ -490,7 +496,7 @@ pub mod pallet { let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); Self::edit_announcements(&delegate, |ann| { ann.real != real || ann.call_hash != call_hash || @@ -626,7 +632,7 @@ impl Pallet { ) -> T::AccountId { let (height, ext_index) = maybe_when.unwrap_or_else(|| { ( - frame_system::Pallet::::block_number(), + T::BlockNumberProvider::current_block_number(), frame_system::Pallet::::extrinsic_index().unwrap_or_default(), ) }); diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs index 5baf9bb9e838..afc668188e6c 100644 --- a/substrate/frame/proxy/src/tests.rs +++ b/substrate/frame/proxy/src/tests.rs @@ -119,6 +119,7 @@ impl Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } use super::{Call as ProxyCall, Event as ProxyEvent}; diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 81fbbc8cf38e..e61554f5cfa0 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] environmental = { workspace = true } paste = { workspace = true } -polkavm = { version = "0.13.0", default-features = false } +polkavm = { version = "0.17.0", default-features = false } bitflags = { workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive"], workspace = true } @@ -32,17 +32,15 @@ impl-trait-for-tuples = { workspace = true } rlp = { workspace = true } derive_more = { workspace = true } hex = { workspace = true } -jsonrpsee = { workspace = true, features = ["full"], optional = true } ethereum-types = { workspace = true, features = ["codec", "rlp", "serialize"] } # Polkadot SDK Dependencies frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-balances = { optional = true, workspace = true } -pallet-revive-fixtures = { workspace = true, default-features = false, optional = true } -pallet-revive-uapi = { workspace = true, default-features = true } -pallet-revive-proc-macro = { workspace = true, default-features = true } +pallet-revive-fixtures = { workspace = true, optional = true } +pallet-revive-uapi = { workspace = true, features = ["scale"] } +pallet-revive-proc-macro = { workspace = true } pallet-transaction-payment = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } @@ -61,7 +59,6 @@ subxt-signer = { workspace = true, optional = true, features = [ array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } pretty_assertions = { workspace = true } -pallet-revive-fixtures = { workspace = true, default-features = true } secp256k1 = { workspace = true, features = ["recovery"] } serde_json = { workspace = true } hex-literal = { workspace = true } @@ -69,10 +66,9 @@ hex-literal = { workspace = true } # Polkadot SDK Dependencies pallet-balances = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } pallet-utility = { workspace = true, default-features = true } -pallet-assets = { workspace = true, default-features = true } pallet-proxy = { workspace = true, default-features = true } +pallet-revive-fixtures = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } xcm-builder = { workspace = true, default-features = true } @@ -87,9 +83,7 @@ std = [ "frame-support/std", "frame-system/std", "hex/std", - "jsonrpsee", "log/std", - "pallet-balances?/std", "pallet-proxy/std", "pallet-revive-fixtures?/std", "pallet-timestamp/std", @@ -117,9 +111,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-revive-fixtures", "pallet-timestamp/runtime-benchmarks", @@ -127,13 +119,12 @@ runtime-benchmarks = [ "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", - "pallet-assets/try-runtime", "pallet-balances/try-runtime", - "pallet-message-queue/try-runtime", "pallet-proxy/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", diff --git a/substrate/frame/revive/README.md b/substrate/frame/revive/README.md index 5352e636c252..575920dfaac7 100644 --- a/substrate/frame/revive/README.md +++ b/substrate/frame/revive/README.md @@ -92,7 +92,7 @@ Driven by the desire to have an iterative approach in developing new contract in concept of an unstable interface. Akin to the rust nightly compiler it allows us to add new interfaces but mark them as unstable so that contract languages can experiment with them and give feedback before we stabilize those. -In order to access interfaces which don't have a stable `#[api_version(x)]` in [`runtime.rs`](src/wasm/runtime.rs) +In order to access interfaces which don't have a stable `#[stable]` in [`runtime.rs`](src/wasm/runtime.rs) one need to set `pallet_revive::Config::UnsafeUnstableInterface` to `ConstU32`. **It should be obvious that any production runtime should never be compiled with this feature: In addition to be subject to change or removal those interfaces might not have proper weights associated with them and are therefore diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 7a5452853d65..88921cca08ec 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -5,6 +5,11 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Fixtures for testing and benchmarking" +homepage.workspace = true +repository.workspace = true + +[package.metadata.polkadot-sdk] +exclude-from-umbrella = true [lints] workspace = true @@ -18,10 +23,8 @@ anyhow = { workspace = true, default-features = true, optional = true } log = { workspace = true } [build-dependencies] -parity-wasm = { workspace = true } -tempfile = { workspace = true } toml = { workspace = true } -polkavm-linker = { version = "0.14.0" } +polkavm-linker = { version = "0.17.0" } anyhow = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 3472e0846efd..eca547bc6ddd 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -20,7 +20,8 @@ use anyhow::Result; use anyhow::{bail, Context}; use std::{ - cfg, env, fs, + env, fs, + io::Write, path::{Path, PathBuf}, process::Command, }; @@ -82,7 +83,7 @@ fn create_cargo_toml<'a>( entries: impl Iterator, output_dir: &Path, ) -> Result<()> { - let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/Cargo.toml"))?; + let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/_Cargo.toml"))?; let mut set_dep = |name, path| -> Result<()> { cargo_toml["dependencies"][name]["path"] = toml::Value::String( fixtures_dir.join(path).canonicalize()?.to_str().unwrap().to_string(), @@ -108,21 +109,24 @@ fn create_cargo_toml<'a>( let cargo_toml = toml::to_string_pretty(&cargo_toml)?; fs::write(output_dir.join("Cargo.toml"), cargo_toml.clone()) .with_context(|| format!("Failed to write {cargo_toml:?}"))?; + fs::copy( + fixtures_dir.join("build/_rust-toolchain.toml"), + output_dir.join("rust-toolchain.toml"), + ) + .context("Failed to write toolchain file")?; Ok(()) } -fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { +fn invoke_build(current_dir: &Path) -> Result<()> { let encoded_rustflags = ["-Dwarnings"].join("\x1f"); - let mut build_command = Command::new(env::var("CARGO")?); + let mut build_command = Command::new("cargo"); build_command .current_dir(current_dir) .env_clear() .env("PATH", env::var("PATH").unwrap_or_default()) .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) - .env("RUSTC_BOOTSTRAP", "1") .env("RUSTUP_HOME", env::var("RUSTUP_HOME").unwrap_or_default()) - .env("RUSTUP_TOOLCHAIN", env::var("RUSTUP_TOOLCHAIN").unwrap_or_default()) .args([ "build", "--release", @@ -130,7 +134,7 @@ fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { "-Zbuild-std-features=panic_immediate_abort", ]) .arg("--target") - .arg(target); + .arg(polkavm_linker::target_json_64_path().unwrap()); if let Ok(toolchain) = env::var(OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR) { build_command.env("RUSTUP_TOOLCHAIN", &toolchain); @@ -168,7 +172,7 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result for entry in entries { post_process( &build_dir - .join("target/riscv32emac-unknown-none-polkavm/release") + .join("target/riscv64emac-unknown-none-polkavm/release") .join(entry.name()), &out_dir.join(entry.out_filename()), )?; @@ -177,11 +181,66 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result Ok(()) } +/// Create a directory in the `target` as output directory +fn create_out_dir() -> Result { + let temp_dir: PathBuf = env::var("OUT_DIR")?.into(); + + // this is set in case the user has overriden the target directory + let out_dir = if let Ok(path) = env::var("CARGO_TARGET_DIR") { + path.into() + } else { + // otherwise just traverse up from the out dir + let mut out_dir: PathBuf = temp_dir.clone(); + loop { + if !out_dir.pop() { + bail!("Cannot find project root.") + } + if out_dir.join("Cargo.lock").exists() { + break; + } + } + out_dir.join("target") + } + .join("pallet-revive-fixtures"); + + // clean up some leftover symlink from previous versions of this script + let mut out_exists = out_dir.exists(); + if out_exists && !out_dir.is_dir() { + fs::remove_file(&out_dir)?; + out_exists = false; + } + + if !out_exists { + fs::create_dir(&out_dir).context("Failed to create output directory")?; + } + + // write the location of the out dir so it can be found later + let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) + .context("Failed to create fixture_location.rs")?; + write!( + file, + r#" + #[allow(dead_code)] + const FIXTURE_DIR: &str = "{0}"; + macro_rules! fixture {{ + ($name: literal) => {{ + include_bytes!(concat!("{0}", "/", $name, ".polkavm")) + }}; + }} + "#, + out_dir.display() + ) + .context("Failed to write to fixture_location.rs")?; + + Ok(out_dir) +} + pub fn main() -> Result<()> { let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); let contracts_dir = fixtures_dir.join("contracts"); - let out_dir: PathBuf = env::var("OUT_DIR")?.into(); - let target = fixtures_dir.join("riscv32emac-unknown-none-polkavm.json"); + let out_dir = create_out_dir().context("Cannot determine output directory")?; + let build_dir = out_dir.join("build"); + fs::create_dir_all(&build_dir).context("Failed to create build directory")?; println!("cargo::rerun-if-env-changed={OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR}"); println!("cargo::rerun-if-env-changed={OVERRIDE_STRIP_ENV_VAR}"); @@ -199,25 +258,9 @@ pub fn main() -> Result<()> { return Ok(()) } - let tmp_dir = tempfile::tempdir()?; - let tmp_dir_path = tmp_dir.path(); - - create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; - invoke_build(&target, tmp_dir_path)?; - - write_output(tmp_dir_path, &out_dir, entries)?; - - #[cfg(unix)] - if let Ok(symlink_dir) = env::var("CARGO_WORKSPACE_ROOT_DIR") { - let symlink_dir: PathBuf = symlink_dir.into(); - let symlink_dir: PathBuf = symlink_dir.join("target").join("pallet-revive-fixtures"); - if symlink_dir.is_symlink() { - fs::remove_file(&symlink_dir) - .with_context(|| format!("Failed to remove_file {symlink_dir:?}"))?; - } - std::os::unix::fs::symlink(&out_dir, &symlink_dir) - .with_context(|| format!("Failed to symlink {out_dir:?} -> {symlink_dir:?}"))?; - } + create_cargo_toml(&fixtures_dir, entries.iter(), &build_dir)?; + invoke_build(&build_dir)?; + write_output(&build_dir, &out_dir, entries)?; Ok(()) } diff --git a/substrate/frame/revive/fixtures/build/Cargo.toml b/substrate/frame/revive/fixtures/build/_Cargo.toml similarity index 62% rename from substrate/frame/revive/fixtures/build/Cargo.toml rename to substrate/frame/revive/fixtures/build/_Cargo.toml index 5d0e256e2e73..8dc38e14c14b 100644 --- a/substrate/frame/revive/fixtures/build/Cargo.toml +++ b/substrate/frame/revive/fixtures/build/_Cargo.toml @@ -4,14 +4,17 @@ publish = false version = "1.0.0" edition = "2021" +# Make sure this is not included into the workspace +[workspace] + # Binary targets are injected dynamically by the build script. [[bin]] # All paths are injected dynamically by the build script. [dependencies] -uapi = { package = 'pallet-revive-uapi', path = "", default-features = false } +uapi = { package = 'pallet-revive-uapi', path = "", features = ["unstable-api"], default-features = false } common = { package = 'pallet-revive-fixtures-common', path = "" } -polkavm-derive = { version = "0.14.0" } +polkavm-derive = { version = "0.17.0" } [profile.release] opt-level = 3 diff --git a/substrate/frame/revive/fixtures/build/_rust-toolchain.toml b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml new file mode 100644 index 000000000000..4c757c708d58 --- /dev/null +++ b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "nightly-2024-11-19" +components = ["rust-src"] +profile = "minimal" diff --git a/substrate/frame/revive/fixtures/contracts/call_data_load.rs b/substrate/frame/revive/fixtures/contracts/call_data_load.rs new file mode 100644 index 000000000000..d3df9433f5d1 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/call_data_load.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This uses the call data load API to first the first input byte. +//! This single input byte is used as the offset for a second call +//! to the call data load API. +//! The output of the second API call is returned. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let mut buf = [0; 32]; + api::call_data_load(&mut buf, 0); + + let offset = buf[31] as u32; + let mut buf = [0; 32]; + api::call_data_load(&mut buf, offset); + + api::return_value(ReturnFlags::empty(), &buf); +} diff --git a/substrate/frame/revive/fixtures/contracts/call_data_size.rs b/substrate/frame/revive/fixtures/contracts/call_data_size.rs new file mode 100644 index 000000000000..32205b921d47 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/call_data_size.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Returns the call data size back to the caller. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let mut buf = [0; 32]; + api::call_data_size(&mut buf); + + api::return_value(ReturnFlags::empty(), &buf); +} diff --git a/substrate/frame/revive/fixtures/contracts/caller_contract.rs b/substrate/frame/revive/fixtures/contracts/caller_contract.rs index f9a30b87df47..edad43fae251 100644 --- a/substrate/frame/revive/fixtures/contracts/caller_contract.rs +++ b/substrate/frame/revive/fixtures/contracts/caller_contract.rs @@ -65,7 +65,7 @@ pub extern "C" fn call() { None, Some(&salt), ); - assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); + assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); // Fail to deploy the contract due to insufficient proof_size weight. let res = api::instantiate( @@ -79,7 +79,7 @@ pub extern "C" fn call() { None, Some(&salt), ); - assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); + assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); // Deploy the contract successfully. let mut callee = [0u8; 20]; @@ -121,7 +121,7 @@ pub extern "C" fn call() { &input, None, ); - assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); + assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); // Fail to call the contract due to insufficient proof_size weight. let res = api::call( @@ -134,7 +134,7 @@ pub extern "C" fn call() { &input, None, ); - assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); + assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); // Call the contract successfully. let mut output = [0u8; 4]; diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs index 4fa2db0c8c1c..a12c36af856a 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs @@ -40,7 +40,7 @@ pub extern "C" fn call() { api::set_storage(StorageFlags::empty(), buffer, &[1u8; 4]); // Call the callee - api::call( + let ret = api::call( uapi::CallFlags::empty(), callee, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -49,8 +49,10 @@ pub extern "C" fn call() { &[0u8; 32], // Value transferred to the contract. input, None, - ) - .unwrap(); + ); + if let Err(code) = ret { + api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); + }; // create 8 byte of storage after calling // item of 12 bytes because we override 4 bytes diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs index 463706457a15..ecc0fc79e6fd 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs @@ -39,7 +39,7 @@ pub extern "C" fn call() { let salt = [0u8; 32]; let mut address = [0u8; 20]; - api::instantiate( + let ret = api::instantiate( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. @@ -49,8 +49,10 @@ pub extern "C" fn call() { Some(&mut address), None, Some(&salt), - ) - .unwrap(); + ); + if let Err(code) = ret { + api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); + }; // Return the deployed contract address. api::return_value(uapi::ReturnFlags::empty(), &address); diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs index 55203d534c9b..0f157f5a18ac 100644 --- a/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs +++ b/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs @@ -34,7 +34,11 @@ pub extern "C" fn call() { ); let input = [0u8; 0]; - api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, Some(&u256_bytes(deposit_limit)), &input, None).unwrap(); + let ret = api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, Some(&u256_bytes(deposit_limit)), &input, None); + + if let Err(code) = ret { + api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); + }; let mut key = [0u8; 32]; key[0] = 1u8; diff --git a/substrate/frame/revive/fixtures/contracts/set_code_hash.rs b/substrate/frame/revive/fixtures/contracts/set_code_hash.rs index 75995d7bb8a2..7292c6fd10ae 100644 --- a/substrate/frame/revive/fixtures/contracts/set_code_hash.rs +++ b/substrate/frame/revive/fixtures/contracts/set_code_hash.rs @@ -29,7 +29,7 @@ pub extern "C" fn deploy() {} #[polkavm_derive::polkavm_export] pub extern "C" fn call() { input!(addr: &[u8; 32],); - api::set_code_hash(addr).unwrap(); + api::set_code_hash(addr); // we return 1 after setting new code_hash // next `call` will NOT return this value, because contract code has been changed diff --git a/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs b/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs new file mode 100644 index 000000000000..93ea86754f55 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![no_std] +#![no_main] + +extern crate common; + +#[polkavm_derive::polkavm_import] +extern "C" { + pub fn __this_syscall_does_not_exist__(); +} + +// Export that is never called. We can put code here that should be in the binary +// but is never supposed to be run. +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call_never() { + // make sure it is not optimized away + unsafe { + __this_syscall_does_not_exist__(); + } +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/contracts/unstable_interface.rs b/substrate/frame/revive/fixtures/contracts/unstable_interface.rs new file mode 100644 index 000000000000..d73ae041dc06 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/unstable_interface.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![no_std] +#![no_main] + +extern crate common; + +#[polkavm_derive::polkavm_import] +extern "C" { + pub fn set_code_hash(); +} + +// Export that is never called. We can put code here that should be in the binary +// but is never supposed to be run. +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call_never() { + // make sure it is not optimized away + unsafe { + set_code_hash(); + } +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json b/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json deleted file mode 100644 index bbd54cdefbac..000000000000 --- a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "arch": "riscv32", - "cpu": "generic-rv32", - "crt-objects-fallback": "false", - "data-layout": "e-m:e-p:32:32-i64:64-n32-S32", - "eh-frame-header": false, - "emit-debug-gdb-scripts": false, - "features": "+e,+m,+a,+c,+lui-addi-fusion,+fast-unaligned-access,+xtheadcondmov", - "linker": "rust-lld", - "linker-flavor": "ld.lld", - "llvm-abiname": "ilp32e", - "llvm-target": "riscv32", - "max-atomic-width": 32, - "panic-strategy": "abort", - "relocation-model": "pie", - "target-pointer-width": "32", - "singlethread": true, - "pre-link-args": { - "ld": [ - "--emit-relocs", - "--unique", - "--relocatable" - ] - }, - "env": "polkavm" -} diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs index cc84daec9b59..24f6ee547dc7 100644 --- a/substrate/frame/revive/fixtures/src/lib.rs +++ b/substrate/frame/revive/fixtures/src/lib.rs @@ -19,10 +19,13 @@ extern crate alloc; +// generated file that tells us where to find the fixtures +include!(concat!(env!("OUT_DIR"), "/fixture_location.rs")); + /// Load a given wasm module and returns a wasm binary contents along with it's hash. #[cfg(feature = "std")] pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H256)> { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = FIXTURE_DIR.into(); let fixture_path = out_dir.join(format!("{fixture_name}.polkavm")); log::debug!("Loading fixture from {fixture_path:?}"); let binary = std::fs::read(fixture_path)?; @@ -36,12 +39,6 @@ pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H /// available in no-std environments (runtime benchmarks). pub mod bench { use alloc::vec::Vec; - - macro_rules! fixture { - ($name: literal) => { - include_bytes!(concat!(env!("OUT_DIR"), "/", $name, ".polkavm")) - }; - } pub const DUMMY: &[u8] = fixture!("dummy"); pub const NOOP: &[u8] = fixture!("noop"); pub const INSTR: &[u8] = fixture!("instr_benchmark"); @@ -61,7 +58,7 @@ pub mod bench { mod test { #[test] fn out_dir_should_have_compiled_mocks() { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = crate::FIXTURE_DIR.into(); assert!(out_dir.join("dummy.polkavm").exists()); } } diff --git a/substrate/frame/revive/mock-network/Cargo.toml b/substrate/frame/revive/mock-network/Cargo.toml index c5b18b3fa290..6208db45a91e 100644 --- a/substrate/frame/revive/mock-network/Cargo.toml +++ b/substrate/frame/revive/mock-network/Cargo.toml @@ -85,6 +85,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/revive/mock-network/src/tests.rs b/substrate/frame/revive/mock-network/src/tests.rs index bd05726a1a45..34f797c2b530 100644 --- a/substrate/frame/revive/mock-network/src/tests.rs +++ b/substrate/frame/revive/mock-network/src/tests.rs @@ -24,7 +24,7 @@ use frame_support::traits::{fungibles::Mutate, Currency}; use frame_system::RawOrigin; use pallet_revive::{ test_utils::{self, builder::*}, - Code, + Code, DepositLimit, }; use pallet_revive_fixtures::compile_module; use pallet_revive_uapi::ReturnErrorCode; @@ -52,7 +52,7 @@ fn instantiate_test_contract(name: &str) -> Contract { RawOrigin::Signed(ALICE).into(), Code::Upload(wasm), ) - .storage_deposit_limit(1_000_000_000_000) + .storage_deposit_limit(DepositLimit::Balance(1_000_000_000_000)) .build_and_unwrap_contract() }); diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index 012b4bfab9a9..ed1798e5b689 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -79,6 +79,7 @@ use syn::{parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, F /// - `Result<(), TrapReason>`, /// - `Result`, /// - `Result`. +/// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: /// - `pallet_revive::wasm::Environment> where E: Ext` @@ -118,7 +119,7 @@ struct EnvDef { /// Parsed host function definition. struct HostFn { item: syn::ItemFn, - api_version: Option, + is_stable: bool, name: String, returns: HostFnReturn, cfg: Option, @@ -127,6 +128,7 @@ struct HostFn { enum HostFnReturn { Unit, U32, + U64, ReturnCode, } @@ -134,8 +136,7 @@ impl HostFnReturn { fn map_output(&self) -> TokenStream2 { match self { Self::Unit => quote! { |_| None }, - Self::U32 => quote! { |ret_val| Some(ret_val) }, - Self::ReturnCode => quote! { |ret_code| Some(ret_code.into()) }, + _ => quote! { |ret_val| Some(ret_val.into()) }, } } @@ -143,6 +144,7 @@ impl HostFnReturn { match self { Self::Unit => syn::ReturnType::Default, Self::U32 => parse_quote! { -> u32 }, + Self::U64 => parse_quote! { -> u64 }, Self::ReturnCode => parse_quote! { -> ReturnErrorCode }, } } @@ -181,22 +183,21 @@ impl HostFn { }; // process attributes - let msg = "Only #[api_version()], #[cfg] and #[mutating] attributes are allowed."; + let msg = "Only #[stable], #[cfg] and #[mutating] attributes are allowed."; let span = item.span(); let mut attrs = item.attrs.clone(); attrs.retain(|a| !a.path().is_ident("doc")); - let mut api_version = None; + let mut is_stable = false; let mut mutating = false; let mut cfg = None; while let Some(attr) = attrs.pop() { let ident = attr.path().get_ident().ok_or(err(span, msg))?.to_string(); match ident.as_str() { - "api_version" => { - if api_version.is_some() { - return Err(err(span, "#[api_version] can only be specified once")) + "stable" => { + if is_stable { + return Err(err(span, "#[stable] can only be specified once")) } - api_version = - Some(attr.parse_args::().and_then(|lit| lit.base10_parse())?); + is_stable = true; }, "mutating" => { if mutating { @@ -243,7 +244,8 @@ impl HostFn { let msg = r#"Should return one of the following: - Result<(), TrapReason>, - Result, - - Result"#; + - Result, + - Result"#; let ret_ty = match item.clone().sig.output { syn::ReturnType::Type(_, ty) => Ok(ty.clone()), _ => Err(err(span, &msg)), @@ -305,11 +307,12 @@ impl HostFn { let returns = match ok_ty_str.as_str() { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), + "u64" => Ok(HostFnReturn::U64), "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; - Ok(Self { item, api_version, name, returns, cfg }) + Ok(Self { item, is_stable, name, returns, cfg }) }, _ => Err(err(span, &msg)), } @@ -339,48 +342,61 @@ where P: Iterator> + Clone, I: Iterator> + Clone, { - const ALLOWED_REGISTERS: u32 = 6; - let mut registers_used = 0; - let mut bindings = vec![]; - for (idx, (name, ty)) in param_names.clone().zip(param_types.clone()).enumerate() { + const ALLOWED_REGISTERS: usize = 6; + + // all of them take one register but we truncate them before passing into the function + // it is important to not allow any type which has illegal bit patterns like 'bool' + if !param_types.clone().all(|ty| { let syn::Type::Path(path) = &**ty else { panic!("Type needs to be path"); }; let Some(ident) = path.path.get_ident() else { panic!("Type needs to be ident"); }; - let size = if ident == "i8" || - ident == "i16" || - ident == "i32" || - ident == "u8" || - ident == "u16" || - ident == "u32" - { - 1 - } else if ident == "i64" || ident == "u64" { - 2 - } else { - panic!("Pass by value only supports primitives"); - }; - registers_used += size; - if registers_used > ALLOWED_REGISTERS { - return quote! { - let (#( #param_names, )*): (#( #param_types, )*) = memory.read_as(__a0__)?; - } - } - let this_reg = quote::format_ident!("__a{}__", idx); - let next_reg = quote::format_ident!("__a{}__", idx + 1); - let binding = if size == 1 { + matches!(ident.to_string().as_ref(), "u8" | "u16" | "u32" | "u64") + }) { + panic!("Only primitive unsigned integers are allowed as arguments to syscalls"); + } + + // too many arguments: pass as pointer to a struct in memory + if param_names.clone().count() > ALLOWED_REGISTERS { + let fields = param_names.clone().zip(param_types.clone()).map(|(name, ty)| { quote! { - let #name = #this_reg as #ty; + #name: #ty, } - } else { - quote! { - let #name = (#this_reg as #ty) | ((#next_reg as #ty) << 32); + }); + return quote! { + #[derive(Default)] + #[repr(C)] + struct Args { + #(#fields)* } - }; - bindings.push(binding); + let Args { #(#param_names,)* } = { + let len = ::core::mem::size_of::(); + let mut args = Args::default(); + let ptr = &mut args as *mut Args as *mut u8; + // Safety + // 1. The struct is initialized at all times. + // 2. We only allow primitive integers (no bools) as arguments so every bit pattern is safe. + // 3. The reference doesn't outlive the args field. + // 4. There is only the single reference to the args field. + // 5. The length of the generated slice is the same as the struct. + let reference = unsafe { + ::core::slice::from_raw_parts_mut(ptr, len) + }; + memory.read_into_buf(__a0__ as _, reference)?; + args + }; + } } + + // otherwise: one argument per register + let bindings = param_names.zip(param_types).enumerate().map(|(idx, (name, ty))| { + let reg = quote::format_ident!("__a{}__", idx); + quote! { + let #name = #reg as #ty; + } + }); quote! { #( #bindings )* } @@ -394,20 +410,24 @@ fn expand_env(def: &EnvDef) -> TokenStream2 { let impls = expand_functions(def); let bench_impls = expand_bench_functions(def); let docs = expand_func_doc(def); - let highest_api_version = - def.host_funcs.iter().filter_map(|f| f.api_version).max().unwrap_or_default(); + let stable_syscalls = expand_func_list(def, false); + let all_syscalls = expand_func_list(def, true); quote! { - #[cfg(test)] - pub const HIGHEST_API_VERSION: u16 = #highest_api_version; + pub fn list_syscalls(include_unstable: bool) -> &'static [&'static [u8]] { + if include_unstable { + #all_syscalls + } else { + #stable_syscalls + } + } impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { fn handle_ecall( &mut self, memory: &mut M, __syscall_symbol__: &[u8], - __available_api_version__: ApiVersion, - ) -> Result, TrapReason> + ) -> Result, TrapReason> { #impls } @@ -457,10 +477,6 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { let body = &f.item.block; let map_output = f.returns.map_output(); let output = &f.item.sig.output; - let api_version = match f.api_version { - Some(version) => quote! { Some(#version) }, - None => quote! { None }, - }; // wrapped host function body call with host function traces // see https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/contracts#host-function-tracing @@ -496,7 +512,7 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { quote! { #cfg - #syscall_symbol if __is_available__(#api_version) => { + #syscall_symbol => { // closure is needed so that "?" can infere the correct type (|| #output { #arg_decoder @@ -517,18 +533,6 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { // This is the overhead to call an empty syscall that always needs to be charged. self.charge_gas(crate::wasm::RuntimeCosts::HostFn).map_err(TrapReason::from)?; - // Not all APIs are available depending on configuration or when the code was deployed. - // This closure will be used by syscall specific code to perform this check. - let __is_available__ = |syscall_version: Option| { - match __available_api_version__ { - ApiVersion::UnsafeNewest => true, - ApiVersion::Versioned(max_available_version) => - syscall_version - .map(|required_version| max_available_version >= required_version) - .unwrap_or(false), - } - }; - // They will be mapped to variable names by the syscall specific code. let (__a0__, __a1__, __a2__, __a3__, __a4__, __a5__) = memory.read_input_regs(); @@ -590,10 +594,8 @@ fn expand_func_doc(def: &EnvDef) -> TokenStream2 { }); quote! { #( #docs )* } }; - let availability = if let Some(version) = func.api_version { - let info = format!( - "\n# Required API version\nThis API was added in version **{version}**.", - ); + let availability = if func.is_stable { + let info = "\n# Stable API\nThis API is stable and will never change."; quote! { #[doc = #info] } } else { let info = @@ -615,3 +617,20 @@ fn expand_func_doc(def: &EnvDef) -> TokenStream2 { #( #docs )* } } + +fn expand_func_list(def: &EnvDef, include_unstable: bool) -> TokenStream2 { + let docs = def.host_funcs.iter().filter(|f| include_unstable || f.is_stable).map(|f| { + let name = Literal::byte_string(f.name.as_bytes()); + quote! { + #name.as_slice() + } + }); + let len = docs.clone().count(); + + quote! { + { + static FUNCS: [&[u8]; #len] = [#(#docs),*]; + FUNCS.as_slice() + } + } +} diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml index 9f89b74c668f..674abdd5b73e 100644 --- a/substrate/frame/revive/rpc/Cargo.toml +++ b/substrate/frame/revive/rpc/Cargo.toml @@ -67,15 +67,15 @@ hex = { workspace = true } hex-literal = { workspace = true, optional = true } scale-info = { workspace = true } secp256k1 = { workspace = true, optional = true, features = ["recovery"] } -env_logger = { workspace = true } ethabi = { version = "18.0.0" } [features] example = ["hex-literal", "rlp", "secp256k1", "subxt-signer"] [dev-dependencies] +env_logger = { workspace = true } static_init = { workspace = true } hex-literal = { workspace = true } -pallet-revive-fixtures = { workspace = true } +pallet-revive-fixtures = { workspace = true, default-features = true } substrate-cli-test-utils = { workspace = true } subxt-signer = { workspace = true, features = ["unstable-eth"] } diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json b/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json new file mode 100644 index 000000000000..2d8dccc771e8 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json @@ -0,0 +1,106 @@ +[ + { + "inputs": [ + { + "internalType": "string", + "name": "message", + "type": "string" + } + ], + "name": "CustomError", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "bool", + "name": "newState", + "type": "bool" + } + ], + "name": "setState", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "state", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "triggerAssertError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerCustomError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerDivisionByZero", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerOutOfBoundsError", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerRequireError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerRevertError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "valueMatch", + "outputs": [], + "stateMutability": "payable", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts new file mode 100644 index 000000000000..f3776e498fd5 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts @@ -0,0 +1,106 @@ +export const ErrorTesterAbi = [ + { + inputs: [ + { + internalType: "string", + name: "message", + type: "string", + }, + ], + name: "CustomError", + type: "error", + }, + { + inputs: [ + { + internalType: "bool", + name: "newState", + type: "bool", + }, + ], + name: "setState", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [], + name: "state", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "triggerAssertError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerCustomError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerDivisionByZero", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerOutOfBoundsError", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerRequireError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerRevertError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [ + { + internalType: "uint256", + name: "value", + type: "uint256", + }, + ], + name: "valueMatch", + outputs: [], + stateMutability: "payable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/EventExample.json b/substrate/frame/revive/rpc/examples/js/abi/EventExample.json new file mode 100644 index 000000000000..a64c920c4068 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/EventExample.json @@ -0,0 +1,34 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "string", + "name": "message", + "type": "string" + } + ], + "name": "ExampleEvent", + "type": "event" + }, + { + "inputs": [], + "name": "triggerEvent", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/EventExample.ts b/substrate/frame/revive/rpc/examples/js/abi/EventExample.ts new file mode 100644 index 000000000000..efb0d741b48f --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/EventExample.ts @@ -0,0 +1,34 @@ +export const EventExampleAbi = [ + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: "address", + name: "sender", + type: "address", + }, + { + indexed: false, + internalType: "uint256", + name: "value", + type: "uint256", + }, + { + indexed: false, + internalType: "string", + name: "message", + type: "string", + }, + ], + name: "ExampleEvent", + type: "event", + }, + { + inputs: [], + name: "triggerEvent", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/Flipper.json b/substrate/frame/revive/rpc/examples/js/abi/Flipper.json new file mode 100644 index 000000000000..4c1b163d2943 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/Flipper.json @@ -0,0 +1,35 @@ +[ + { + "inputs": [], + "name": "flip", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "getValue", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "value", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/Flipper.ts b/substrate/frame/revive/rpc/examples/js/abi/Flipper.ts new file mode 100644 index 000000000000..d7428beb6aa9 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/Flipper.ts @@ -0,0 +1,35 @@ +export const FlipperAbi = [ + { + inputs: [], + name: "flip", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [], + name: "getValue", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "value", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.json b/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.json new file mode 100644 index 000000000000..c4ed4228f47d --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.json @@ -0,0 +1,46 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "_flipperAddress", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "callFlip", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "callGetValue", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "flipperAddress", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.ts b/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.ts new file mode 100644 index 000000000000..2d695886d960 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.ts @@ -0,0 +1,46 @@ +export const FlipperCallerAbi = [ + { + inputs: [ + { + internalType: "address", + name: "_flipperAddress", + type: "address", + }, + ], + stateMutability: "nonpayable", + type: "constructor", + }, + { + inputs: [], + name: "callFlip", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [], + name: "callGetValue", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "flipperAddress", + outputs: [ + { + internalType: "address", + name: "", + type: "address", + }, + ], + stateMutability: "view", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/PiggyBank.json b/substrate/frame/revive/rpc/examples/js/abi/PiggyBank.json new file mode 100644 index 000000000000..e6655889e21a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/PiggyBank.json @@ -0,0 +1,65 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "deposit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "getDeposit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "withdrawAmount", + "type": "uint256" + } + ], + "name": "withdraw", + "outputs": [ + { + "internalType": "uint256", + "name": "remainingBal", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/RevertExample.ts b/substrate/frame/revive/rpc/examples/js/abi/RevertExample.ts new file mode 100644 index 000000000000..ab483b1811c4 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/RevertExample.ts @@ -0,0 +1,14 @@ +export const RevertExampleAbi = [ + { + inputs: [], + stateMutability: "nonpayable", + type: "constructor", + }, + { + inputs: [], + name: "doRevert", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts new file mode 100644 index 000000000000..f3776e498fd5 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts @@ -0,0 +1,106 @@ +export const ErrorTesterAbi = [ + { + inputs: [ + { + internalType: "string", + name: "message", + type: "string", + }, + ], + name: "CustomError", + type: "error", + }, + { + inputs: [ + { + internalType: "bool", + name: "newState", + type: "bool", + }, + ], + name: "setState", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [], + name: "state", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "triggerAssertError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerCustomError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerDivisionByZero", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerOutOfBoundsError", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerRequireError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerRevertError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [ + { + internalType: "uint256", + name: "value", + type: "uint256", + }, + ], + name: "valueMatch", + outputs: [], + stateMutability: "payable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/event.json b/substrate/frame/revive/rpc/examples/js/abi/event.json deleted file mode 100644 index d36089fbc84e..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/event.json +++ /dev/null @@ -1,34 +0,0 @@ -[ - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "sender", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "value", - "type": "uint256" - }, - { - "indexed": false, - "internalType": "string", - "name": "message", - "type": "string" - } - ], - "name": "ExampleEvent", - "type": "event" - }, - { - "inputs": [], - "name": "triggerEvent", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json deleted file mode 100644 index 2c2cfd5f7533..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json +++ /dev/null @@ -1,65 +0,0 @@ -[ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "deposit", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "payable", - "type": "function" - }, - { - "inputs": [], - "name": "getDeposit", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "owner", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "withdrawAmount", - "type": "uint256" - } - ], - "name": "withdraw", - "outputs": [ - { - "internalType": "uint256", - "name": "remainingBal", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts new file mode 100644 index 000000000000..a6b8c1b0be56 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts @@ -0,0 +1,65 @@ +export const PiggyBankAbi = [ + { + inputs: [], + stateMutability: "nonpayable", + type: "constructor", + }, + { + inputs: [], + name: "deposit", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "payable", + type: "function", + }, + { + inputs: [], + name: "getDeposit", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "owner", + outputs: [ + { + internalType: "address", + name: "", + type: "address", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint256", + name: "withdrawAmount", + type: "uint256", + }, + ], + name: "withdraw", + outputs: [ + { + internalType: "uint256", + name: "remainingBal", + type: "uint256", + }, + ], + stateMutability: "nonpayable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/revert.json b/substrate/frame/revive/rpc/examples/js/abi/revert.json deleted file mode 100644 index be2945fcc0a5..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/revert.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "doRevert", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb index 700dca51da2a..46994bb14754 100755 Binary files a/substrate/frame/revive/rpc/examples/js/bun.lockb and b/substrate/frame/revive/rpc/examples/js/bun.lockb differ diff --git a/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json new file mode 100644 index 000000000000..ce2220e0b756 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json @@ -0,0 +1,3 @@ +{ + "extends": "solhint:recommended" +} diff --git a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol b/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol new file mode 100644 index 000000000000..f1fdd219624a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract ErrorTester { + bool public state; + + // Payable function that can be used to test insufficient funds errors + function valueMatch(uint256 value) public payable { + require(msg.value == value , "msg.value does not match value"); + } + + function setState(bool newState) public { + state = newState; + } + + // Trigger a require statement failure with a custom error message + function triggerRequireError() public pure { + require(false, "This is a require error"); + } + + // Trigger an assert statement failure + function triggerAssertError() public pure { + assert(false); + } + + // Trigger a revert statement with a custom error message + function triggerRevertError() public pure { + revert("This is a revert error"); + } + + // Trigger a division by zero error + function triggerDivisionByZero() public pure returns (uint256) { + uint256 a = 1; + uint256 b = 0; + return a / b; + } + + // Trigger an out-of-bounds array access + function triggerOutOfBoundsError() public pure returns (uint256) { + uint256[] memory arr = new uint256[](1); + return arr[2]; + } + + // Trigger a custom error + error CustomError(string message); + + function triggerCustomError() public pure { + revert CustomError("This is a custom error"); + } +} + diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol b/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol new file mode 100644 index 000000000000..51aaafcae428 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// Flipper - Stores and toggles a boolean value +contract Flipper { + bool public value; + + function flip() external { + value = !value; + } + + function getValue() external view returns (bool) { + return value; + } +} + +// FlipperCaller - Interacts with the Flipper contract +contract FlipperCaller { + // Address of the Flipper contract + address public flipperAddress; + + // Constructor to initialize Flipper's address + constructor(address _flipperAddress) { + flipperAddress = _flipperAddress; + } + + function callFlip() external { + Flipper(flipperAddress).flip(); + } + + function callGetValue() external view returns (bool) { + return Flipper(flipperAddress).getValue(); + } +} + diff --git a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol index 1906c4658889..0c8a4d26f4dc 100644 --- a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol +++ b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.0; contract PiggyBank { - uint private balance; + uint256 private balance; address public owner; constructor() { @@ -11,16 +11,16 @@ contract PiggyBank { balance = 0; } - function deposit() public payable returns (uint) { + function deposit() public payable returns (uint256) { balance += msg.value; return balance; } - function getDeposit() public view returns (uint) { + function getDeposit() public view returns (uint256) { return balance; } - function withdraw(uint withdrawAmount) public returns (uint remainingBal) { + function withdraw(uint256 withdrawAmount) public returns (uint256 remainingBal) { require(msg.sender == owner); balance -= withdrawAmount; (bool success, ) = payable(msg.sender).call{value: withdrawAmount}(""); diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Revert.sol b/substrate/frame/revive/rpc/examples/js/contracts/Revert.sol deleted file mode 100644 index 53f1f8994256..000000000000 --- a/substrate/frame/revive/rpc/examples/js/contracts/Revert.sol +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract RevertExample { - constructor() { - } - - function doRevert() public { - revert("revert message"); - } -} diff --git a/substrate/frame/revive/rpc/examples/js/evm/.gitkeep b/substrate/frame/revive/rpc/examples/js/evm/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json index 3ae1f0fbd799..6d8d00fd4214 100644 --- a/substrate/frame/revive/rpc/examples/js/package.json +++ b/substrate/frame/revive/rpc/examples/js/package.json @@ -1,22 +1,23 @@ { - "name": "demo", - "private": true, - "version": "0.0.0", - "type": "module", - "scripts": { - "dev": "vite", - "build": "tsc && vite build", - "preview": "vite preview", - "generate-types": "typechain --target=ethers-v6 'abi/*.json'" - }, - "dependencies": { - "@typechain/ethers-v6": "^0.5.1", - "ethers": "^6.13.4", - "solc": "^0.8.28", - "typechain": "^8.3.2" - }, - "devDependencies": { - "typescript": "^5.5.3", - "vite": "^5.4.8" - } + "name": "demo", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "ethers": "^6.13.4", + "solc": "^0.8.28", + "viem": "^2.21.47", + "@parity/revive": "^0.0.5" + }, + "devDependencies": { + "prettier": "^3.3.3", + "@types/bun": "^1.1.13", + "typescript": "^5.5.3", + "vite": "^5.4.8" + } } diff --git a/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm new file mode 100644 index 000000000000..ffdbbe2f9c4b Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm new file mode 100644 index 000000000000..4b013b35852e Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm new file mode 100644 index 000000000000..7fb299e464dd Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm new file mode 100644 index 000000000000..f112ca275df4 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm new file mode 100644 index 000000000000..4423c605f746 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/event.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/event.polkavm deleted file mode 100644 index 859c1cdf5d3e..000000000000 Binary files a/substrate/frame/revive/rpc/examples/js/pvm/event.polkavm and /dev/null differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/piggyBank.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/piggyBank.polkavm deleted file mode 100644 index 1a45c15d53f6..000000000000 Binary files a/substrate/frame/revive/rpc/examples/js/pvm/piggyBank.polkavm and /dev/null differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/revert.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/revert.polkavm deleted file mode 100644 index 7505c402f21b..000000000000 Binary files a/substrate/frame/revive/rpc/examples/js/pvm/revert.polkavm and /dev/null differ diff --git a/substrate/frame/revive/rpc/examples/js/src/balance.ts b/substrate/frame/revive/rpc/examples/js/src/balance.ts new file mode 100644 index 000000000000..1261dcab7812 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/balance.ts @@ -0,0 +1,8 @@ +import { walletClient } from './lib.ts' + +const recipient = '0x8D97689C9818892B700e27F316cc3E41e17fBeb9' +try { + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) +} catch (err) { + console.error(err) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts index c6b7700d1ccf..a37b850214b8 100644 --- a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts +++ b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts @@ -1,11 +1,23 @@ import { compile } from '@parity/revive' +import { format } from 'prettier' +import { parseArgs } from 'node:util' import solc from 'solc' -import { readFileSync, writeFileSync } from 'fs' -import { join } from 'path' +import { readdirSync, readFileSync, writeFileSync } from 'fs' +import { basename, join } from 'path' type CompileInput = Parameters[0] -type CompileOutput = Awaited> -type Abi = CompileOutput['contracts'][string][string]['abi'] + +const { + values: { filter }, +} = parseArgs({ + args: process.argv.slice(2), + options: { + filter: { + type: 'string', + short: 'f', + }, + }, +}) function evmCompile(sources: CompileInput) { const input = { @@ -25,32 +37,60 @@ function evmCompile(sources: CompileInput) { console.log('Compiling contracts...') -const input = [ - { file: 'Event.sol', contract: 'EventExample', keypath: 'event' }, - { file: 'Revert.sol', contract: 'RevertExample', keypath: 'revert' }, - { file: 'PiggyBank.sol', contract: 'PiggyBank', keypath: 'piggyBank' }, -] +const rootDir = join(__dirname, '..') +const contractsDir = join(rootDir, 'contracts') +const abiDir = join(rootDir, 'abi') +const pvmDir = join(rootDir, 'pvm') +const evmDir = join(rootDir, 'evm') -for (const { keypath, contract, file } of input) { +const input = readdirSync(contractsDir) + .filter((f) => f.endsWith('.sol')) + .filter((f) => !filter || f.includes(filter)) + +for (const file of input) { + console.log(`🔨 Compiling ${file}...`) + const name = basename(file, '.sol') const input = { - [file]: { content: readFileSync(join('contracts', file), 'utf8') }, + [name]: { content: readFileSync(join(contractsDir, file), 'utf8') }, } - { - console.log(`Compile with solc ${file}`) - const out = JSON.parse(evmCompile(input)) - const entry = out.contracts[file][contract] - writeFileSync(join('evm', `${keypath}.bin`), Buffer.from(entry.evm.bytecode.object, 'hex')) - writeFileSync(join('abi', `${keypath}.json`), JSON.stringify(entry.abi, null, 2)) + console.log('Compiling with revive...') + const reviveOut = await compile(input) + + for (const contracts of Object.values(reviveOut.contracts)) { + for (const [name, contract] of Object.entries(contracts)) { + console.log(`📜 Add PVM contract ${name}`) + const abi = contract.abi + const abiName = `${name}Abi` + writeFileSync( + join(abiDir, `${name}.json`), + JSON.stringify(abi, null, 2) + ) + + writeFileSync( + join(abiDir, `${name}.ts`), + await format(`export const ${abiName} = ${JSON.stringify(abi, null, 2)} as const`, { + parser: 'typescript', + }) + ) + + writeFileSync( + join(pvmDir, `${name}.polkavm`), + Buffer.from(contract.evm.bytecode.object, 'hex') + ) + } } - { - console.log(`Compile with revive ${file}`) - const out = await compile(input) - const entry = out.contracts[file][contract] - writeFileSync( - join('pvm', `${keypath}.polkavm`), - Buffer.from(entry.evm.bytecode.object, 'hex') - ) + console.log(`Compile with solc ${file}`) + const evmOut = JSON.parse(evmCompile(input)) as typeof reviveOut + + for (const contracts of Object.values(evmOut.contracts)) { + for (const [name, contract] of Object.entries(contracts)) { + console.log(`📜 Add EVM contract ${name}`) + writeFileSync( + join(evmDir, `${name}.bin`), + Buffer.from(contract.evm.bytecode.object, 'hex') + ) + } } } diff --git a/substrate/frame/revive/rpc/examples/js/src/event.ts b/substrate/frame/revive/rpc/examples/js/src/event.ts index 94cc2560272e..2e672a9772ff 100644 --- a/substrate/frame/revive/rpc/examples/js/src/event.ts +++ b/substrate/frame/revive/rpc/examples/js/src/event.ts @@ -1,15 +1,29 @@ //! Run with bun run script-event.ts -import { call, getContract, deploy } from './lib.ts' - -try { - const { abi, bytecode } = getContract('event') - const contract = await deploy(bytecode, abi) - const receipt = await call('triggerEvent', await contract.getAddress(), abi) - if (receipt) { - for (const log of receipt.logs) { - console.log('Event log:', JSON.stringify(log, null, 2)) - } - } -} catch (err) { - console.error(err) + +import { abi } from '../abi/event.ts' +import { assert, getByteCode, walletClient } from './lib.ts' + +const deployHash = await walletClient.deployContract({ + abi, + bytecode: getByteCode('event'), +}) +const deployReceipt = await walletClient.waitForTransactionReceipt({ hash: deployHash }) +const contractAddress = deployReceipt.contractAddress +console.log('Contract deployed:', contractAddress) +assert(contractAddress, 'Contract address should be set') + +const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'triggerEvent', +}) + +const hash = await walletClient.writeContract(request) +const receipt = await walletClient.waitForTransactionReceipt({ hash }) +console.log(`Receipt: ${receipt.status}`) +console.log(`Logs receipt: ${receipt.status}`) + +for (const log of receipt.logs) { + console.log('Event log:', log) } diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts new file mode 100644 index 000000000000..3db2453f2475 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts @@ -0,0 +1,177 @@ +import { spawn, spawnSync, Subprocess } from 'bun' +import { resolve } from 'path' +import { readFileSync } from 'fs' +import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem' +import { privateKeyToAccount } from 'viem/accounts' + +export function getByteCode(name: string, evm: boolean): Hex { + const bytecode = evm ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) + return `0x${Buffer.from(bytecode).toString('hex')}` +} + +export type JsonRpcError = { + code: number + message: string + data: Hex +} + +export function killProcessOnPort(port: number) { + // Check which process is using the specified port + const result = spawnSync(['lsof', '-ti', `:${port}`]) + const output = result.stdout.toString().trim() + + if (output) { + console.log(`Port ${port} is in use. Killing process...`) + const pids = output.split('\n') + + // Kill each process using the port + for (const pid of pids) { + spawnSync(['kill', '-9', pid]) + console.log(`Killed process with PID: ${pid}`) + } + } +} + +export let jsonRpcErrors: JsonRpcError[] = [] +export async function createEnv(name: 'geth' | 'kitchensink') { + const gethPort = process.env.GETH_PORT || '8546' + const kitchensinkPort = process.env.KITCHENSINK_PORT || '8545' + const url = `http://localhost:${name == 'geth' ? gethPort : kitchensinkPort}` + const chain = defineChain({ + id: name == 'geth' ? 1337 : 420420420, + name, + nativeCurrency: { + name: 'Westie', + symbol: 'WST', + decimals: 18, + }, + rpcUrls: { + default: { + http: [url], + }, + }, + testnet: true, + }) + + const transport = http(url, { + onFetchResponse: async (response) => { + const raw = await response.clone().json() + if (raw.error) { + jsonRpcErrors.push(raw.error as JsonRpcError) + } + }, + }) + + const wallet = createWalletClient({ + transport, + chain, + }) + + const [account] = await wallet.getAddresses() + const serverWallet = createWalletClient({ + account, + transport, + chain, + }).extend(publicActions) + + const accountWallet = createWalletClient({ + account: privateKeyToAccount( + '0xa872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f' + ), + transport, + chain, + }).extend(publicActions) + + return { serverWallet, accountWallet, evm: name == 'geth' } +} + +// wait for http request to return 200 +export function waitForHealth(url: string) { + return new Promise((resolve, reject) => { + const start = Date.now() + const interval = setInterval(async () => { + try { + const res = await fetch(url, { + method: 'POST', + headers: { + 'content-type': 'application/json', + }, + body: JSON.stringify({ + jsonrpc: '2.0', + method: 'eth_syncing', + params: [], + id: 1, + }), + }) + + if (res.status !== 200) { + return + } + + clearInterval(interval) + resolve() + } catch (_err) { + const elapsed = Date.now() - start + if (elapsed > 30_000) { + clearInterval(interval) + reject(new Error('hit timeout')) + } + } + }, 1000) + }) +} + +export const procs: Subprocess[] = [] +const polkadotSdkPath = resolve(__dirname, '../../../../../../..') +if (!process.env.USE_LIVE_SERVERS) { + procs.push( + // Run geth on port 8546 + await (async () => { + killProcessOnPort(8546) + const proc = spawn( + 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( + ' ' + ), + { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } + ) + + await waitForHealth('http://localhost:8546').catch() + return proc + })(), + //Run the substate node + (() => { + killProcessOnPort(9944) + return spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/kitchensink.out.log'), + stderr: Bun.file('/tmp/kitchensink.err.log'), + cwd: polkadotSdkPath, + } + ) + })(), + // Run eth-rpc on 8545 + await (async () => { + killProcessOnPort(8545) + const proc = spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } + ) + await waitForHealth('http://localhost:8545').catch() + return proc + })() + ) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts new file mode 100644 index 000000000000..37ebbc9ea3b3 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts @@ -0,0 +1,315 @@ +import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts' +import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test' +import { encodeFunctionData, Hex, parseEther } from 'viem' +import { ErrorTesterAbi } from '../abi/ErrorTester' +import { FlipperCallerAbi } from '../abi/FlipperCaller' +import { FlipperAbi } from '../abi/Flipper' + +afterEach(() => { + jsonRpcErrors.length = 0 +}) + +afterAll(async () => { + procs.forEach((proc) => proc.kill()) +}) + +const envs = await Promise.all([createEnv('geth'), createEnv('kitchensink')]) + +for (const env of envs) { + describe(env.serverWallet.chain.name, () => { + let errorTesterAddr: Hex = '0x' + let flipperAddr: Hex = '0x' + let flipperCallerAddr: Hex = '0x' + beforeAll(async () => { + { + const hash = await env.serverWallet.deployContract({ + abi: ErrorTesterAbi, + bytecode: getByteCode('errorTester', env.evm), + }) + const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + if (!deployReceipt.contractAddress) + throw new Error('Contract address should be set') + errorTesterAddr = deployReceipt.contractAddress + } + + { + const hash = await env.serverWallet.deployContract({ + abi: FlipperAbi, + bytecode: getByteCode('flipper', env.evm), + }) + const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + if (!deployReceipt.contractAddress) + throw new Error('Contract address should be set') + flipperAddr = deployReceipt.contractAddress + } + + { + const hash = await env.serverWallet.deployContract({ + abi: FlipperCallerAbi, + args: [flipperAddr], + bytecode: getByteCode('flipperCaller', env.evm), + }) + const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + if (!deployReceipt.contractAddress) + throw new Error('Contract address should be set') + flipperCallerAddr = deployReceipt.contractAddress + } + }) + + test('triggerAssertError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerAssertError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000001' + ) + expect(lastJsonRpcError?.message).toBe('execution reverted: assert(false)') + } + }) + + test('triggerRevertError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerRevertError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.message).toBe('execution reverted: This is a revert error') + expect(lastJsonRpcError?.data).toBe( + '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120726576657274206572726f7200000000000000000000' + ) + } + }) + + test('triggerDivisionByZero', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerDivisionByZero', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000012' + ) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: division or modulo by zero' + ) + } + }) + + test('triggerOutOfBoundsError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerOutOfBoundsError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000032' + ) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: out-of-bounds access of an array or bytesN' + ) + } + }) + + test('triggerCustomError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerCustomError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x8d6ea8be0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120637573746f6d206572726f7200000000000000000000' + ) + expect(lastJsonRpcError?.message).toBe('execution reverted') + } + }) + + test('eth_call (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.simulateContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_call transfer (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.sendTransaction({ + to: '0x75E480dB528101a381Ce68544611C169Ad7EB342', + value: parseEther('10'), + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate call caller (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (revert)', async () => { + expect.assertions(3) + try { + await env.serverWallet.estimateContractGas({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'valueMatch', + value: parseEther('11'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: msg.value does not match value' + ) + expect(lastJsonRpcError?.data).toBe( + '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e6d73672e76616c756520646f6573206e6f74206d617463682076616c75650000' + ) + } + }) + + test('eth_get_balance (no account)', async () => { + const balance = await env.serverWallet.getBalance({ + address: '0x0000000000000000000000000000000000000123', + }) + expect(balance).toBe(0n) + }) + + test('eth_estimate (not enough funds to cover gas specified)', async () => { + expect.assertions(4) + try { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'setState', + args: [true], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (no gas specified)', async () => { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + const data = encodeFunctionData({ + abi: ErrorTesterAbi, + functionName: 'setState', + args: [true], + }) + + await env.accountWallet.request({ + method: 'eth_estimateGas', + params: [ + { + data, + from: env.accountWallet.account.address, + to: errorTesterAddr, + }, + ], + }) + }) + + test.only('eth_estimate (no gas specified) child_call', async () => { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + const data = encodeFunctionData({ + abi: FlipperCallerAbi, + functionName: 'callFlip', + }) + + await env.accountWallet.request({ + method: 'eth_estimateGas', + params: [ + { + data, + from: env.accountWallet.account.address, + to: flipperCallerAddr, + gas: `0x${Number(1000000).toString(16)}`, + }, + ], + }) + }) + }) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/lib.ts b/substrate/frame/revive/rpc/examples/js/src/lib.ts index 975d8faf15b3..e1f0e780d95b 100644 --- a/substrate/frame/revive/rpc/examples/js/src/lib.ts +++ b/substrate/frame/revive/rpc/examples/js/src/lib.ts @@ -1,22 +1,11 @@ -import { - Contract, - ContractFactory, - JsonRpcProvider, - TransactionReceipt, - TransactionResponse, - Wallet, -} from 'ethers' import { readFileSync } from 'node:fs' -import type { compile } from '@parity/revive' import { spawn } from 'node:child_process' import { parseArgs } from 'node:util' -import { BaseContract } from 'ethers' - -type CompileOutput = Awaited> -type Abi = CompileOutput['contracts'][string][string]['abi'] +import { createWalletClient, defineChain, Hex, http, parseEther, publicActions } from 'viem' +import { privateKeyToAccount } from 'viem/accounts' const { - values: { geth, westend, ['private-key']: privateKey }, + values: { geth, proxy, westend, endowment, ['private-key']: privateKey }, } = parseArgs({ args: process.argv.slice(2), options: { @@ -24,6 +13,13 @@ const { type: 'string', short: 'k', }, + endowment: { + type: 'string', + short: 'e', + }, + proxy: { + type: 'boolean', + }, geth: { type: 'boolean', }, @@ -42,7 +38,7 @@ if (geth) { '--http.api', 'web3,eth,debug,personal,net', '--http.port', - '8546', + process.env.GETH_PORT ?? '8546', '--dev', '--verbosity', '0', @@ -55,56 +51,78 @@ if (geth) { await new Promise((resolve) => setTimeout(resolve, 500)) } -export const provider = new JsonRpcProvider( - westend +const rpcUrl = proxy + ? 'http://localhost:8080' + : westend ? 'https://westend-asset-hub-eth-rpc.polkadot.io' : geth ? 'http://localhost:8546' : 'http://localhost:8545' -) -export const signer = privateKey ? new Wallet(privateKey, provider) : await provider.getSigner() -console.log(`Signer address: ${await signer.getAddress()}, Nonce: ${await signer.getNonce()}`) +export const chain = defineChain({ + id: geth ? 1337 : 420420420, + name: 'Asset Hub Westend', + network: 'asset-hub', + nativeCurrency: { + name: 'Westie', + symbol: 'WST', + decimals: 18, + }, + rpcUrls: { + default: { + http: [rpcUrl], + }, + }, + testnet: true, +}) + +const wallet = createWalletClient({ + transport: http(), + chain, +}) +const [account] = await wallet.getAddresses() +export const serverWalletClient = createWalletClient({ + account, + transport: http(), + chain, +}) + +export const walletClient = await (async () => { + if (privateKey) { + const account = privateKeyToAccount(`0x${privateKey}`) + console.log(`Wallet address ${account.address}`) + + const wallet = createWalletClient({ + account, + transport: http(), + chain, + }) + + if (endowment) { + await serverWalletClient.sendTransaction({ + to: account.address, + value: parseEther(endowment), + }) + console.log(`Endowed address ${account.address} with: ${endowment}`) + } + + return wallet.extend(publicActions) + } else { + return serverWalletClient.extend(publicActions) + } +})() /** * Get one of the pre-built contracts * @param name - the contract name */ -export function getContract(name: string): { abi: Abi; bytecode: string } { +export function getByteCode(name: string): Hex { const bytecode = geth ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) - const abi = JSON.parse(readFileSync(`abi/${name}.json`, 'utf8')) as Abi - return { abi, bytecode: Buffer.from(bytecode).toString('hex') } + return `0x${Buffer.from(bytecode).toString('hex')}` } -/** - * Deploy a contract - * @returns the contract address - **/ -export async function deploy(bytecode: string, abi: Abi, args: any[] = []): Promise { - console.log('Deploying contract with', args) - const contractFactory = new ContractFactory(abi, bytecode, signer) - - const contract = await contractFactory.deploy(args) - await contract.waitForDeployment() - const address = await contract.getAddress() - console.log(`Contract deployed: ${address}`) - - return contract -} - -/** - * Call a contract - **/ -export async function call( - method: string, - address: string, - abi: Abi, - args: any[] = [], - opts: { value?: bigint } = {} -): Promise { - console.log(`Calling ${method} at ${address} with`, args, opts) - const contract = new Contract(address, abi, signer) - const tx = (await contract[method](...args, opts)) as TransactionResponse - console.log('Call transaction hash:', tx.hash) - return tx.wait() +export function assert(condition: any, message: string): asserts condition { + if (!condition) { + throw new Error(message) + } } diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts index 7a8edbde3662..0040b0c78dc4 100644 --- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts +++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts @@ -1,24 +1,69 @@ -import { provider, call, getContract, deploy } from './lib.ts' -import { parseEther } from 'ethers' -import { PiggyBank } from '../types/ethers-contracts/PiggyBank' +import { assert, getByteCode, walletClient } from './lib.ts' +import { abi } from '../abi/piggyBank.ts' +import { parseEther } from 'viem' -try { - const { abi, bytecode } = getContract('piggyBank') - const contract = (await deploy(bytecode, abi)) as PiggyBank - const address = await contract.getAddress() +const hash = await walletClient.deployContract({ + abi, + bytecode: getByteCode('piggyBank'), +}) +const deployReceipt = await walletClient.waitForTransactionReceipt({ hash }) +const contractAddress = deployReceipt.contractAddress +console.log('Contract deployed:', contractAddress) +assert(contractAddress, 'Contract address should be set') - let receipt = await call('deposit', address, abi, [], { - value: parseEther('10.0'), +// Deposit 10 WST +{ + const result = await walletClient.estimateContractGas({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'deposit', + value: parseEther('10'), }) - console.log('Deposit receipt:', receipt?.status) - console.log(`Contract balance: ${await provider.getBalance(address)}`) - console.log('deposit: ', await contract.getDeposit()) + console.log(`Gas estimate: ${result}`) - receipt = await call('withdraw', address, abi, [parseEther('5.0')]) - console.log('Withdraw receipt:', receipt?.status) - console.log(`Contract balance: ${await provider.getBalance(address)}`) - console.log('deposit: ', await contract.getDeposit()) -} catch (err) { - console.error(err) + const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'deposit', + value: parseEther('10'), + }) + + request.nonce = 0 + const hash = await walletClient.writeContract(request) + + const receipt = await walletClient.waitForTransactionReceipt({ hash }) + console.log(`Deposit receipt: ${receipt.status}`) + if (process.env.STOP) { + process.exit(0) + } +} + +// Withdraw 5 WST +{ + const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'withdraw', + args: [parseEther('5')], + }) + + const hash = await walletClient.writeContract(request) + const receipt = await walletClient.waitForTransactionReceipt({ hash }) + console.log(`Withdraw receipt: ${receipt.status}`) + + // Check remaining balance + const balance = await walletClient.readContract({ + address: contractAddress, + abi, + functionName: 'getDeposit', + }) + + console.log(`Get deposit: ${balance}`) + console.log( + `Get contract balance: ${await walletClient.getBalance({ address: contractAddress })}` + ) } diff --git a/substrate/frame/revive/rpc/examples/js/src/revert.ts b/substrate/frame/revive/rpc/examples/js/src/revert.ts deleted file mode 100644 index ea1bf4eceeb9..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/revert.ts +++ /dev/null @@ -1,10 +0,0 @@ -//! Run with bun run script-revert.ts -import { call, getContract, deploy } from './lib.ts' - -try { - const { abi, bytecode } = getContract('revert') - const contract = await deploy(bytecode, abi) - await call('doRevert', await contract.getAddress(), abi) -} catch (err) { - console.error(err) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/transfer.ts b/substrate/frame/revive/rpc/examples/js/src/transfer.ts index ae2dd50f2af8..aef9a487b0c0 100644 --- a/substrate/frame/revive/rpc/examples/js/src/transfer.ts +++ b/substrate/frame/revive/rpc/examples/js/src/transfer.ts @@ -1,17 +1,18 @@ -import { parseEther } from 'ethers' -import { provider, signer } from './lib.ts' +import { parseEther } from 'viem' +import { walletClient } from './lib.ts' const recipient = '0x75E480dB528101a381Ce68544611C169Ad7EB342' try { - console.log(`Signer balance: ${await provider.getBalance(signer.address)}`) - console.log(`Recipient balance: ${await provider.getBalance(recipient)}`) - await signer.sendTransaction({ + console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) + + await walletClient.sendTransaction({ to: recipient, value: parseEther('1.0'), }) console.log(`Sent: ${parseEther('1.0')}`) - console.log(`Signer balance: ${await provider.getBalance(signer.address)}`) - console.log(`Recipient balance: ${await provider.getBalance(recipient)}`) + console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) } catch (err) { console.error(err) } diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts deleted file mode 100644 index d65f953969f0..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts +++ /dev/null @@ -1,117 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BigNumberish, - BytesLike, - FunctionFragment, - Result, - Interface, - EventFragment, - AddressLike, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedLogDescription, - TypedListener, - TypedContractMethod, -} from './common' - -export interface EventInterface extends Interface { - getFunction(nameOrSignature: 'triggerEvent'): FunctionFragment - - getEvent(nameOrSignatureOrTopic: 'ExampleEvent'): EventFragment - - encodeFunctionData(functionFragment: 'triggerEvent', values?: undefined): string - - decodeFunctionResult(functionFragment: 'triggerEvent', data: BytesLike): Result -} - -export namespace ExampleEventEvent { - export type InputTuple = [sender: AddressLike, value: BigNumberish, message: string] - export type OutputTuple = [sender: string, value: bigint, message: string] - export interface OutputObject { - sender: string - value: bigint - message: string - } - export type Event = TypedContractEvent - export type Filter = TypedDeferredTopicFilter - export type Log = TypedEventLog - export type LogDescription = TypedLogDescription -} - -export interface Event extends BaseContract { - connect(runner?: ContractRunner | null): Event - waitForDeployment(): Promise - - interface: EventInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - triggerEvent: TypedContractMethod<[], [void], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'triggerEvent'): TypedContractMethod<[], [void], 'nonpayable'> - - getEvent( - key: 'ExampleEvent' - ): TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - - filters: { - 'ExampleEvent(address,uint256,string)': TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - ExampleEvent: TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts deleted file mode 100644 index ca137fcc8b30..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts +++ /dev/null @@ -1,96 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BigNumberish, - BytesLike, - FunctionFragment, - Result, - Interface, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedListener, - TypedContractMethod, -} from './common' - -export interface PiggyBankInterface extends Interface { - getFunction(nameOrSignature: 'deposit' | 'getDeposit' | 'owner' | 'withdraw'): FunctionFragment - - encodeFunctionData(functionFragment: 'deposit', values?: undefined): string - encodeFunctionData(functionFragment: 'getDeposit', values?: undefined): string - encodeFunctionData(functionFragment: 'owner', values?: undefined): string - encodeFunctionData(functionFragment: 'withdraw', values: [BigNumberish]): string - - decodeFunctionResult(functionFragment: 'deposit', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'getDeposit', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'owner', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'withdraw', data: BytesLike): Result -} - -export interface PiggyBank extends BaseContract { - connect(runner?: ContractRunner | null): PiggyBank - waitForDeployment(): Promise - - interface: PiggyBankInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - deposit: TypedContractMethod<[], [bigint], 'payable'> - - getDeposit: TypedContractMethod<[], [bigint], 'view'> - - owner: TypedContractMethod<[], [string], 'view'> - - withdraw: TypedContractMethod<[withdrawAmount: BigNumberish], [bigint], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'deposit'): TypedContractMethod<[], [bigint], 'payable'> - getFunction(nameOrSignature: 'getDeposit'): TypedContractMethod<[], [bigint], 'view'> - getFunction(nameOrSignature: 'owner'): TypedContractMethod<[], [string], 'view'> - getFunction( - nameOrSignature: 'withdraw' - ): TypedContractMethod<[withdrawAmount: BigNumberish], [bigint], 'nonpayable'> - - filters: {} -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts deleted file mode 100644 index ad6e23b38a65..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts +++ /dev/null @@ -1,78 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BytesLike, - FunctionFragment, - Result, - Interface, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedListener, - TypedContractMethod, -} from './common' - -export interface RevertInterface extends Interface { - getFunction(nameOrSignature: 'doRevert'): FunctionFragment - - encodeFunctionData(functionFragment: 'doRevert', values?: undefined): string - - decodeFunctionResult(functionFragment: 'doRevert', data: BytesLike): Result -} - -export interface Revert extends BaseContract { - connect(runner?: ContractRunner | null): Revert - waitForDeployment(): Promise - - interface: RevertInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - doRevert: TypedContractMethod<[], [void], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'doRevert'): TypedContractMethod<[], [void], 'nonpayable'> - - filters: {} -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts deleted file mode 100644 index 247b9468ece2..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts +++ /dev/null @@ -1,100 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - FunctionFragment, - Typed, - EventFragment, - ContractTransaction, - ContractTransactionResponse, - DeferredTopicFilter, - EventLog, - TransactionRequest, - LogDescription, -} from 'ethers' - -export interface TypedDeferredTopicFilter<_TCEvent extends TypedContractEvent> - extends DeferredTopicFilter {} - -export interface TypedContractEvent< - InputTuple extends Array = any, - OutputTuple extends Array = any, - OutputObject = any, -> { - ( - ...args: Partial - ): TypedDeferredTopicFilter> - name: string - fragment: EventFragment - getFragment(...args: Partial): EventFragment -} - -type __TypechainAOutputTuple = T extends TypedContractEvent ? W : never -type __TypechainOutputObject = - T extends TypedContractEvent ? V : never - -export interface TypedEventLog extends Omit { - args: __TypechainAOutputTuple & __TypechainOutputObject -} - -export interface TypedLogDescription - extends Omit { - args: __TypechainAOutputTuple & __TypechainOutputObject -} - -export type TypedListener = ( - ...listenerArg: [...__TypechainAOutputTuple, TypedEventLog, ...undefined[]] -) => void - -export type MinEthersFactory = { - deploy(...a: ARGS[]): Promise -} - -export type GetContractTypeFromFactory = F extends MinEthersFactory ? C : never -export type GetARGsTypeFromFactory = - F extends MinEthersFactory ? Parameters : never - -export type StateMutability = 'nonpayable' | 'payable' | 'view' - -export type BaseOverrides = Omit -export type NonPayableOverrides = Omit -export type PayableOverrides = Omit -export type ViewOverrides = Omit -export type Overrides = S extends 'nonpayable' - ? NonPayableOverrides - : S extends 'payable' - ? PayableOverrides - : ViewOverrides - -export type PostfixOverrides, S extends StateMutability> = - | A - | [...A, Overrides] -export type ContractMethodArgs, S extends StateMutability> = PostfixOverrides< - { [I in keyof A]-?: A[I] | Typed }, - S -> - -export type DefaultReturnType = R extends Array ? R[0] : R - -// export interface ContractMethod = Array, R = any, D extends R | ContractTransactionResponse = R | ContractTransactionResponse> { -export interface TypedContractMethod< - A extends Array = Array, - R = any, - S extends StateMutability = 'payable', -> { - ( - ...args: ContractMethodArgs - ): S extends 'view' ? Promise> : Promise - - name: string - - fragment: FunctionFragment - - getFragment(...args: ContractMethodArgs): FunctionFragment - - populateTransaction(...args: ContractMethodArgs): Promise - staticCall(...args: ContractMethodArgs): Promise> - send(...args: ContractMethodArgs): Promise - estimateGas(...args: ContractMethodArgs): Promise - staticCallResult(...args: ContractMethodArgs): Promise -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts deleted file mode 100644 index 2e16b18a7ed8..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts +++ /dev/null @@ -1,51 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { Event, EventInterface } from '../Event' - -const _abi = [ - { - anonymous: false, - inputs: [ - { - indexed: true, - internalType: 'address', - name: 'sender', - type: 'address', - }, - { - indexed: false, - internalType: 'uint256', - name: 'value', - type: 'uint256', - }, - { - indexed: false, - internalType: 'string', - name: 'message', - type: 'string', - }, - ], - name: 'ExampleEvent', - type: 'event', - }, - { - inputs: [], - name: 'triggerEvent', - outputs: [], - stateMutability: 'nonpayable', - type: 'function', - }, -] as const - -export class Event__factory { - static readonly abi = _abi - static createInterface(): EventInterface { - return new Interface(_abi) as EventInterface - } - static connect(address: string, runner?: ContractRunner | null): Event { - return new Contract(address, _abi, runner) as unknown as Event - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts deleted file mode 100644 index 0efea80ed2dc..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts +++ /dev/null @@ -1,82 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { PiggyBank, PiggyBankInterface } from '../PiggyBank' - -const _abi = [ - { - inputs: [], - stateMutability: 'nonpayable', - type: 'constructor', - }, - { - inputs: [], - name: 'deposit', - outputs: [ - { - internalType: 'uint256', - name: '', - type: 'uint256', - }, - ], - stateMutability: 'payable', - type: 'function', - }, - { - inputs: [], - name: 'getDeposit', - outputs: [ - { - internalType: 'uint256', - name: '', - type: 'uint256', - }, - ], - stateMutability: 'view', - type: 'function', - }, - { - inputs: [], - name: 'owner', - outputs: [ - { - internalType: 'address', - name: '', - type: 'address', - }, - ], - stateMutability: 'view', - type: 'function', - }, - { - inputs: [ - { - internalType: 'uint256', - name: 'withdrawAmount', - type: 'uint256', - }, - ], - name: 'withdraw', - outputs: [ - { - internalType: 'uint256', - name: 'remainingBal', - type: 'uint256', - }, - ], - stateMutability: 'nonpayable', - type: 'function', - }, -] as const - -export class PiggyBank__factory { - static readonly abi = _abi - static createInterface(): PiggyBankInterface { - return new Interface(_abi) as PiggyBankInterface - } - static connect(address: string, runner?: ContractRunner | null): PiggyBank { - return new Contract(address, _abi, runner) as unknown as PiggyBank - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts deleted file mode 100644 index ece1c6b5426e..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts +++ /dev/null @@ -1,31 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { Revert, RevertInterface } from '../Revert' - -const _abi = [ - { - inputs: [], - stateMutability: 'nonpayable', - type: 'constructor', - }, - { - inputs: [], - name: 'doRevert', - outputs: [], - stateMutability: 'nonpayable', - type: 'function', - }, -] as const - -export class Revert__factory { - static readonly abi = _abi - static createInterface(): RevertInterface { - return new Interface(_abi) as RevertInterface - } - static connect(address: string, runner?: ContractRunner | null): Revert { - return new Contract(address, _abi, runner) as unknown as Revert - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts deleted file mode 100644 index 67370dba411c..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts +++ /dev/null @@ -1,6 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -export { Event__factory } from './Event__factory' -export { PiggyBank__factory } from './PiggyBank__factory' -export { Revert__factory } from './Revert__factory' diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts deleted file mode 100644 index 3e324e80dcb1..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -export type { Event } from './Event' -export type { PiggyBank } from './PiggyBank' -export type { Revert } from './Revert' -export * as factories from './factories' -export { Event__factory } from './factories/Event__factory' -export { PiggyBank__factory } from './factories/PiggyBank__factory' -export { Revert__factory } from './factories/Revert__factory' diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata index 3560b3b90407..64b1f2014dd0 100644 Binary files a/substrate/frame/revive/rpc/revive_chain.metadata and b/substrate/frame/revive/rpc/revive_chain.metadata differ diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index 1ca1a6d37c53..901c15e9756b 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -17,13 +17,12 @@ //! The client connects to the source substrate chain //! and is used by the rpc server to query and send transactions to the substrate chain. use crate::{ - rlp, runtime::GAS_PRICE, subxt_client::{ revive::{calls::types::EthTransact, events::ContractEmitted}, runtime_types::pallet_revive::storage::ContractInfo, }, - TransactionLegacySigned, LOG_TARGET, + LOG_TARGET, }; use futures::{stream, StreamExt}; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; @@ -33,7 +32,7 @@ use pallet_revive::{ Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, Log, ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, }, - EthContractResult, + EthTransactError, EthTransactInfo, }; use sp_core::keccak_256; use sp_weights::Weight; @@ -117,18 +116,42 @@ fn unwrap_call_err(err: &subxt::error::RpcError) -> Option { /// Extract the revert message from a revert("msg") solidity statement. fn extract_revert_message(exec_data: &[u8]) -> Option { - let function_selector = exec_data.get(0..4)?; - - // keccak256("Error(string)") - let expected_selector = [0x08, 0xC3, 0x79, 0xA0]; - if function_selector != expected_selector { - return None; - } + let error_selector = exec_data.get(0..4)?; + + match error_selector { + // assert(false) + [0x4E, 0x48, 0x7B, 0x71] => { + let panic_code: u32 = U256::from_big_endian(exec_data.get(4..36)?).try_into().ok()?; + + // See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require + let msg = match panic_code { + 0x00 => "generic panic", + 0x01 => "assert(false)", + 0x11 => "arithmetic underflow or overflow", + 0x12 => "division or modulo by zero", + 0x21 => "enum overflow", + 0x22 => "invalid encoded storage byte array accessed", + 0x31 => "out-of-bounds array access; popping on an empty array", + 0x32 => "out-of-bounds access of an array or bytesN", + 0x41 => "out of memory", + 0x51 => "uninitialized function", + code => return Some(format!("execution reverted: unknown panic code: {code:#x}")), + }; - let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?; - match decoded.first()? { - ethabi::Token::String(msg) => Some(msg.to_string()), - _ => None, + Some(format!("execution reverted: {msg}")) + }, + // revert(string) + [0x08, 0xC3, 0x79, 0xA0] => { + let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?; + if let Some(ethabi::Token::String(msg)) = decoded.first() { + return Some(format!("execution reverted: {msg}")) + } + Some("execution reverted".to_string()) + }, + _ => { + log::debug!(target: LOG_TARGET, "Unknown revert function selector: {error_selector:?}"); + Some("execution reverted".to_string()) + }, } } @@ -147,42 +170,46 @@ pub enum ClientError { /// A [`codec::Error`] wrapper error. #[error(transparent)] CodecError(#[from] codec::Error), - /// The dry run failed. - #[error("Dry run failed: {0}")] - DryRunFailed(String), /// Contract reverted - #[error("Execution reverted: {}", extract_revert_message(.0).unwrap_or_default())] - Reverted(Vec), + #[error("contract reverted")] + Reverted(EthTransactError), /// A decimal conversion failed. - #[error("Conversion failed")] + #[error("conversion failed")] ConversionFailed, /// The block hash was not found. - #[error("Hash not found")] + #[error("hash not found")] BlockNotFound, /// The transaction fee could not be found - #[error("TransactionFeePaid event not found")] + #[error("transactionFeePaid event not found")] TxFeeNotFound, /// The cache is empty. - #[error("Cache is empty")] + #[error("cache is empty")] CacheEmpty, } -// TODO convert error code to https://eips.ethereum.org/EIPS/eip-1474#error-codes +const REVERT_CODE: i32 = 3; impl From for ErrorObjectOwned { fn from(err: ClientError) -> Self { - let msg = err.to_string(); match err { ClientError::SubxtError(subxt::Error::Rpc(err)) | ClientError::RpcError(err) => { if let Some(err) = unwrap_call_err(&err) { return err; } - ErrorObjectOwned::owned::>(CALL_EXECUTION_FAILED_CODE, msg, None) + ErrorObjectOwned::owned::>( + CALL_EXECUTION_FAILED_CODE, + err.to_string(), + None, + ) }, - ClientError::Reverted(data) => { + ClientError::Reverted(EthTransactError::Data(data)) => { + let msg = extract_revert_message(&data).unwrap_or_default(); let data = format!("0x{}", hex::encode(data)); - ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, Some(data)) + ErrorObjectOwned::owned::(REVERT_CODE, msg, Some(data)) }, - _ => ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), + ClientError::Reverted(EthTransactError::Message(msg)) => + ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), + _ => + ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, err.to_string(), None), } } } @@ -269,25 +296,26 @@ impl ClientInner { let extrinsics = extrinsics.iter().flat_map(|ext| { let call = ext.as_extrinsic::().ok()??; let transaction_hash = H256(keccak_256(&call.payload)); - let tx = rlp::decode::(&call.payload).ok()?; - let from = tx.recover_eth_address().ok()?; - let contract_address = if tx.transaction_legacy_unsigned.to.is_none() { - Some(create1(&from, tx.transaction_legacy_unsigned.nonce.try_into().ok()?)) + let signed_tx = TransactionSigned::decode(&call.payload).ok()?; + let from = signed_tx.recover_eth_address().ok()?; + let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from)); + let contract_address = if tx_info.to.is_none() { + Some(create1(&from, tx_info.nonce.unwrap_or_default().try_into().ok()?)) } else { None }; - Some((from, tx, transaction_hash, contract_address, ext)) + Some((from, signed_tx, tx_info, transaction_hash, contract_address, ext)) }); // Map each extrinsic to a receipt stream::iter(extrinsics) - .map(|(from, tx, transaction_hash, contract_address, ext)| async move { + .map(|(from, signed_tx, tx_info, transaction_hash, contract_address, ext)| async move { let events = ext.events().await?; let tx_fees = events.find_first::()?.ok_or(ClientError::TxFeeNotFound)?; - let gas_price = tx.transaction_legacy_unsigned.gas_price; + let gas_price = tx_info.gas_price.unwrap_or_default(); let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee)) .checked_div(gas_price.as_u128()) .unwrap_or_default(); @@ -324,16 +352,16 @@ impl ClientInner { contract_address, from, logs, - tx.transaction_legacy_unsigned.to, + tx_info.to, gas_price, gas_used.into(), success, transaction_hash, transaction_index.into(), - tx.transaction_legacy_unsigned.r#type.as_byte() + tx_info.r#type.unwrap_or_default() ); - Ok::<_, ClientError>((receipt.transaction_hash, (tx.into(), receipt))) + Ok::<_, ClientError>((receipt.transaction_hash, (signed_tx, receipt))) }) .buffer_unordered(10) .collect::>>() @@ -634,54 +662,25 @@ impl Client { Ok(result) } - /// Dry run a transaction and returns the [`EthContractResult`] for the transaction. + /// Dry run a transaction and returns the [`EthTransactInfo`] for the transaction. pub async fn dry_run( &self, - tx: &GenericTransaction, + tx: GenericTransaction, block: BlockNumberOrTagOrHash, - ) -> Result>, ClientError> { + ) -> Result, ClientError> { let runtime_api = self.runtime_api(&block).await?; + let payload = subxt_client::apis().revive_api().eth_transact(tx.into()); - // TODO: remove once subxt is updated - let value = subxt::utils::Static(tx.value.unwrap_or_default()); - let from = tx.from.map(|v| v.0.into()); - let to = tx.to.map(|v| v.0.into()); - - let payload = subxt_client::apis().revive_api().eth_transact( - from.unwrap_or_default(), - to, - value, - tx.input.clone().unwrap_or_default().0, - None, - None, - ); - - let EthContractResult { fee, gas_required, storage_deposit, result } = - runtime_api.call(payload).await?.0; + let result = runtime_api.call(payload).await?; match result { Err(err) => { log::debug!(target: LOG_TARGET, "Dry run failed {err:?}"); - Err(ClientError::DryRunFailed(format!("{err:?}"))) + Err(ClientError::Reverted(err.0)) }, - Ok(result) if result.did_revert() => { - log::debug!(target: LOG_TARGET, "Dry run reverted"); - Err(ClientError::Reverted(result.0.data)) - }, - Ok(result) => - Ok(EthContractResult { fee, gas_required, storage_deposit, result: result.0.data }), + Ok(result) => Ok(result.0), } } - /// Dry run a transaction and returns the gas estimate for the transaction. - pub async fn estimate_gas( - &self, - tx: &GenericTransaction, - block: BlockNumberOrTagOrHash, - ) -> Result { - let dry_run = self.dry_run(tx, block).await?; - Ok(U256::from(dry_run.fee / GAS_PRICE as u128) + GAS_PRICE) - } - /// Get the nonce of the given address. pub async fn nonce( &self, diff --git a/substrate/frame/revive/rpc/src/example.rs b/substrate/frame/revive/rpc/src/example.rs index 20f00465b146..3b9a33296ef4 100644 --- a/substrate/frame/revive/rpc/src/example.rs +++ b/substrate/frame/revive/rpc/src/example.rs @@ -20,8 +20,7 @@ use crate::{EthRpcClient, ReceiptInfo}; use anyhow::Context; use pallet_revive::evm::{ - rlp::*, Account, BlockTag, Bytes, GenericTransaction, TransactionLegacyUnsigned, H160, H256, - U256, + Account, BlockTag, Bytes, GenericTransaction, TransactionLegacyUnsigned, H160, H256, U256, }; /// Wait for a transaction receipt. @@ -169,11 +168,11 @@ impl TransactionBuilder { mutate(&mut unsigned_tx); - let tx = signer.sign_transaction(unsigned_tx.clone()); - let bytes = tx.rlp_bytes().to_vec(); + let tx = signer.sign_transaction(unsigned_tx.into()); + let bytes = tx.signed_payload(); let hash = client - .send_raw_transaction(bytes.clone().into()) + .send_raw_transaction(bytes.into()) .await .with_context(|| "transaction failed")?; diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index 8d9d6fab829e..ccd8bb043e90 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -23,7 +23,7 @@ use jsonrpsee::{ core::{async_trait, RpcResult}, types::{ErrorCode, ErrorObjectOwned}, }; -use pallet_revive::{evm::*, EthContractResult}; +use pallet_revive::evm::*; use sp_core::{keccak_256, H160, H256, U256}; use thiserror::Error; @@ -128,16 +128,28 @@ impl EthRpcServer for EthRpcServerImpl { async fn estimate_gas( &self, transaction: GenericTransaction, - _block: Option, + block: Option, ) -> RpcResult { - let result = self.client.estimate_gas(&transaction, BlockTag::Latest.into()).await?; - Ok(result) + let dry_run = self.client.dry_run(transaction, block.unwrap_or_default().into()).await?; + Ok(dry_run.eth_gas) + } + + async fn call( + &self, + transaction: GenericTransaction, + block: Option, + ) -> RpcResult { + let dry_run = self + .client + .dry_run(transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) + .await?; + Ok(dry_run.data.into()) } async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult { let hash = H256(keccak_256(&transaction.0)); - let tx = rlp::decode::(&transaction.0).map_err(|err| { + let tx = TransactionSigned::decode(&transaction.0).map_err(|err| { log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); EthRpcError::from(err) })?; @@ -147,38 +159,28 @@ impl EthRpcServer for EthRpcServerImpl { EthRpcError::InvalidSignature })?; + let tx = GenericTransaction::from_signed(tx, Some(eth_addr)); + // Dry run the transaction to get the weight limit and storage deposit limit - let TransactionLegacyUnsigned { to, input, value, .. } = tx.transaction_legacy_unsigned; - let dry_run = self - .client - .dry_run( - &GenericTransaction { - from: Some(eth_addr), - input: Some(input.clone()), - to, - value: Some(value), - ..Default::default() - }, - BlockTag::Latest.into(), - ) - .await?; + let dry_run = self.client.dry_run(tx, BlockTag::Latest.into()).await?; - let EthContractResult { gas_required, storage_deposit, .. } = dry_run; let call = subxt_client::tx().revive().eth_transact( transaction.0, - gas_required.into(), - storage_deposit, + dry_run.gas_required.into(), + dry_run.storage_deposit, ); - self.client.submit(call).await?; + self.client.submit(call).await.map_err(|err| { + log::debug!(target: LOG_TARGET, "submit call failed: {err:?}"); + err + })?; log::debug!(target: LOG_TARGET, "send_raw_transaction hash: {hash:?}"); Ok(hash) } - async fn send_transaction(&self, transaction: GenericTransaction) -> RpcResult { + async fn send_transaction(&self, mut transaction: GenericTransaction) -> RpcResult { log::debug!(target: LOG_TARGET, "{transaction:#?}"); - let GenericTransaction { from, gas, gas_price, input, to, value, r#type, .. } = transaction; - let Some(from) = from else { + let Some(from) = transaction.from else { log::debug!(target: LOG_TARGET, "Transaction must have a sender"); return Err(EthRpcError::InvalidTransaction.into()); }; @@ -189,27 +191,26 @@ impl EthRpcServer for EthRpcServerImpl { .find(|account| account.address() == from) .ok_or(EthRpcError::AccountNotFound(from))?; - let gas_price = gas_price.unwrap_or_else(|| U256::from(GAS_PRICE)); - let chain_id = Some(self.client.chain_id().into()); - let input = input.unwrap_or_default(); - let value = value.unwrap_or_default(); - let r#type = r#type.unwrap_or_default(); + if transaction.gas.is_none() { + transaction.gas = Some(self.estimate_gas(transaction.clone(), None).await?); + } - let Some(gas) = gas else { - log::debug!(target: LOG_TARGET, "Transaction must have a gas limit"); - return Err(EthRpcError::InvalidTransaction.into()); - }; + if transaction.gas_price.is_none() { + transaction.gas_price = Some(self.gas_price().await?); + } - let r#type = Type0::try_from_byte(r#type.clone()) - .map_err(|_| EthRpcError::TransactionTypeNotSupported(r#type))?; + if transaction.nonce.is_none() { + transaction.nonce = + Some(self.get_transaction_count(from, BlockTag::Latest.into()).await?); + } - let nonce = self.get_transaction_count(from, BlockTag::Latest.into()).await?; + if transaction.chain_id.is_none() { + transaction.chain_id = Some(self.chain_id().await?); + } - let tx = - TransactionLegacyUnsigned { chain_id, gas, gas_price, input, nonce, to, value, r#type }; - let tx = account.sign_transaction(tx); - let rlp_bytes = rlp::encode(&tx).to_vec(); - self.send_raw_transaction(Bytes(rlp_bytes)).await + let tx = transaction.try_into_unsigned().map_err(|_| EthRpcError::InvalidTransaction)?; + let payload = account.sign_transaction(tx).signed_payload(); + self.send_raw_transaction(Bytes(payload)).await } async fn get_block_by_hash( @@ -247,18 +248,6 @@ impl EthRpcServer for EthRpcServerImpl { Ok(self.accounts.iter().map(|account| account.address()).collect()) } - async fn call( - &self, - transaction: GenericTransaction, - block: Option, - ) -> RpcResult { - let dry_run = self - .client - .dry_run(&transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) - .await?; - Ok(dry_run.result.into()) - } - async fn get_block_by_number( &self, block: BlockNumberOrTag, diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs index 339080368969..ad34dbfdfb49 100644 --- a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs +++ b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs @@ -14,6 +14,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + //! Generated JSON-RPC methods. #![allow(missing_docs)] diff --git a/substrate/frame/revive/rpc/src/subxt_client.rs b/substrate/frame/revive/rpc/src/subxt_client.rs index a232b231bc7c..1e1c395028a4 100644 --- a/substrate/frame/revive/rpc/src/subxt_client.rs +++ b/substrate/frame/revive/rpc/src/subxt_client.rs @@ -27,8 +27,16 @@ use subxt::config::{signed_extensions, Config, PolkadotConfig}; with = "::subxt::utils::Static<::sp_core::U256>" ), substitute_type( - path = "pallet_revive::primitives::EthContractResult", - with = "::subxt::utils::Static<::pallet_revive::EthContractResult>" + path = "pallet_revive::evm::api::rpc_types_gen::GenericTransaction", + with = "::subxt::utils::Static<::pallet_revive::evm::GenericTransaction>" + ), + substitute_type( + path = "pallet_revive::primitives::EthTransactInfo", + with = "::subxt::utils::Static<::pallet_revive::EthTransactInfo>" + ), + substitute_type( + path = "pallet_revive::primitives::EthTransactError", + with = "::subxt::utils::Static<::pallet_revive::EthTransactError>" ), substitute_type( path = "pallet_revive::primitives::ExecReturnValue", diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index eb23bd7583a0..43b600c33d78 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -32,9 +32,9 @@ use static_init::dynamic; use std::thread; use substrate_cli_test_utils::*; -/// Create a websocket client with a 30s timeout. +/// Create a websocket client with a 120s timeout. async fn ws_client_with_retry(url: &str) -> WsClient { - let timeout = tokio::time::Duration::from_secs(30); + let timeout = tokio::time::Duration::from_secs(120); tokio::time::timeout(timeout, async { loop { if let Ok(client) = WsClientBuilder::default().build(url).await { @@ -222,21 +222,22 @@ async fn deploy_and_call() -> anyhow::Result<()> { async fn revert_call() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); let client = SharedResources::client().await; - let (bytecode, contract) = get_contract("revert")?; + let (bytecode, contract) = get_contract("ErrorTester")?; let receipt = TransactionBuilder::default() - .input(contract.constructor.clone().unwrap().encode_input(bytecode, &[]).unwrap()) + .input(bytecode) .send_and_wait_for_receipt(&client) .await?; let err = TransactionBuilder::default() .to(receipt.contract_address.unwrap()) - .input(contract.function("doRevert")?.encode_input(&[])?.to_vec()) + .input(contract.function("triggerRequireError")?.encode_input(&[])?.to_vec()) .send(&client) .await .unwrap_err(); let call_err = unwrap_call_err!(err.source().unwrap()); - assert_eq!(call_err.message(), "Execution reverted: revert message"); + assert_eq!(call_err.message(), "execution reverted: This is a require error"); + assert_eq!(call_err.code(), 3); Ok(()) } @@ -244,7 +245,7 @@ async fn revert_call() -> anyhow::Result<()> { async fn event_logs() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); let client = SharedResources::client().await; - let (bytecode, contract) = get_contract("event")?; + let (bytecode, contract) = get_contract("EventExample")?; let receipt = TransactionBuilder::default() .input(bytecode) .send_and_wait_for_receipt(&client) @@ -283,7 +284,7 @@ async fn invalid_transaction() -> anyhow::Result<()> { async fn native_evm_ratio_works() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); let client = SharedResources::client().await; - let (bytecode, contract) = get_contract("piggyBank")?; + let (bytecode, contract) = get_contract("PiggyBank")?; let contract_address = TransactionBuilder::default() .input(bytecode) .send_and_wait_for_receipt(&client) diff --git a/substrate/frame/revive/src/benchmarking/call_builder.rs b/substrate/frame/revive/src/benchmarking/call_builder.rs index c666383abb2f..1177d47aadc3 100644 --- a/substrate/frame/revive/src/benchmarking/call_builder.rs +++ b/substrate/frame/revive/src/benchmarking/call_builder.rs @@ -21,7 +21,7 @@ use crate::{ exec::{ExportedFunction, Ext, Key, Stack}, storage::meter::Meter, transient_storage::MeterEntry, - wasm::{ApiVersion, PreparedCall, Runtime}, + wasm::{PreparedCall, Runtime}, BalanceOf, Config, DebugBuffer, Error, GasMeter, MomentOf, Origin, WasmBlob, Weight, }; use alloc::{vec, vec::Vec}; @@ -45,7 +45,7 @@ pub struct CallSetup { impl Default for CallSetup where - T: Config + pallet_balances::Config, + T: Config, BalanceOf: Into + TryFrom, MomentOf: Into, T::Hash: frame_support::traits::IsType, @@ -57,7 +57,7 @@ where impl CallSetup where - T: Config + pallet_balances::Config, + T: Config, BalanceOf: Into + TryFrom, MomentOf: Into, T::Hash: frame_support::traits::IsType, @@ -164,13 +164,7 @@ where module: WasmBlob, input: Vec, ) -> PreparedCall<'a, StackExt<'a, T>> { - module - .prepare_call( - Runtime::new(ext, input), - ExportedFunction::Call, - ApiVersion::UnsafeNewest, - ) - .unwrap() + module.prepare_call(Runtime::new(ext, input), ExportedFunction::Call).unwrap() } /// Add transient_storage diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index 9c4d817a07de..1fb4d7ab58a4 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -34,11 +34,10 @@ use frame_benchmarking::v2::*; use frame_support::{ self, assert_ok, storage::child, - traits::{fungible::InspectHold, Currency}, + traits::fungible::InspectHold, weights::{Weight, WeightMeter}, }; use frame_system::RawOrigin; -use pallet_balances; use pallet_revive_uapi::{CallFlags, ReturnErrorCode, StorageFlags}; use sp_runtime::traits::{Bounded, Hash}; @@ -68,7 +67,7 @@ struct Contract { impl Contract where - T: Config + pallet_balances::Config, + T: Config, BalanceOf: Into + TryFrom, MomentOf: Into, T::Hash: frame_support::traits::IsType, @@ -103,7 +102,7 @@ where origin, 0u32.into(), Weight::MAX, - default_deposit_limit::(), + DepositLimit::Balance(default_deposit_limit::()), Code::Upload(module.code), data, salt, @@ -220,11 +219,10 @@ fn default_deposit_limit() -> BalanceOf { #[benchmarks( where BalanceOf: Into + TryFrom, - T: Config + pallet_balances::Config, + T: Config, MomentOf: Into, ::RuntimeEvent: From>, ::RuntimeCall: From>, - as Currency>::Balance: From>, ::Hash: frame_support::traits::IsType, )] mod benchmarks { @@ -773,6 +771,21 @@ mod benchmarks { assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().minimum_balance()); } + #[benchmark(pov_mode = Measured)] + fn seal_call_data_size() { + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; 128 as usize]); + let mut memory = memory!(vec![0u8; 32 as usize],); + let result; + #[block] + { + result = runtime.bench_call_data_size(memory.as_mut_slice(), 0); + } + assert_ok!(result); + assert_eq!(U256::from_little_endian(&memory[..]), U256::from(128)); + } + #[benchmark(pov_mode = Measured)] fn seal_block_number() { build_runtime!(runtime, memory: [[0u8;32], ]); @@ -840,6 +853,21 @@ mod benchmarks { assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().get_weight_price(weight)); } + #[benchmark(pov_mode = Measured)] + fn seal_call_data_load() { + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; 32]); + let mut memory = memory!(vec![0u8; 32],); + let result; + #[block] + { + result = runtime.bench_call_data_load(memory.as_mut_slice(), 0, 0); + } + assert_ok!(result); + assert_eq!(&memory[..], &vec![42u8; 32]); + } + #[benchmark(pov_mode = Measured)] fn seal_input(n: Linear<0, { limits::code::BLOB_BYTES - 4 }>) { let mut setup = CallSetup::::default(); diff --git a/substrate/frame/revive/src/chain_extension.rs b/substrate/frame/revive/src/chain_extension.rs index ccea12945054..5b3e886a5628 100644 --- a/substrate/frame/revive/src/chain_extension.rs +++ b/substrate/frame/revive/src/chain_extension.rs @@ -75,7 +75,7 @@ use crate::{ Error, }; use alloc::vec::Vec; -use codec::{Decode, MaxEncodedLen}; +use codec::Decode; use frame_support::weights::Weight; use sp_runtime::DispatchError; @@ -304,16 +304,6 @@ impl<'a, 'b, E: Ext, M: ?Sized + Memory> Environment<'a, 'b, E, M> { Ok(()) } - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// This function is secure and recommended for all input types of fixed size - /// as long as the cost of reading the memory is included in the overall already charged - /// weight of the chain extension. This should usually be the case when fixed input types - /// are used. - pub fn read_as(&mut self) -> Result { - self.memory.read_as(self.input_ptr) - } - /// Reads and decodes a type with a dynamic size from contract memory. /// /// Make sure to include `len` in your weight calculations. diff --git a/substrate/frame/revive/src/evm/api.rs b/substrate/frame/revive/src/evm/api.rs index 8185a2c8f6f8..fe18c8735bed 100644 --- a/substrate/frame/revive/src/evm/api.rs +++ b/substrate/frame/revive/src/evm/api.rs @@ -25,9 +25,7 @@ pub use rlp; mod type_id; pub use type_id::*; -#[cfg(feature = "std")] mod rpc_types; - mod rpc_types_gen; pub use rpc_types_gen::*; diff --git a/substrate/frame/revive/src/evm/api/account.rs b/substrate/frame/revive/src/evm/api/account.rs index 8365ebf83cae..ba1c68ea0cf7 100644 --- a/substrate/frame/revive/src/evm/api/account.rs +++ b/substrate/frame/revive/src/evm/api/account.rs @@ -16,10 +16,9 @@ // limitations under the License. //! Utilities for working with Ethereum accounts. use crate::{ - evm::{TransactionLegacySigned, TransactionLegacyUnsigned}, + evm::{TransactionSigned, TransactionUnsigned}, H160, }; -use rlp::Encodable; use sp_runtime::AccountId32; /// A simple account that can sign transactions @@ -38,6 +37,11 @@ impl From for Account { } impl Account { + /// Create a new account from a secret + pub fn from_secret_key(secret_key: [u8; 32]) -> Self { + subxt_signer::eth::Keypair::from_secret_key(secret_key).unwrap().into() + } + /// Get the [`H160`] address of the account. pub fn address(&self) -> H160 { H160::from_slice(&self.0.public_key().to_account_id().as_ref()) @@ -52,9 +56,21 @@ impl Account { } /// Sign a transaction. - pub fn sign_transaction(&self, tx: TransactionLegacyUnsigned) -> TransactionLegacySigned { - let rlp_encoded = tx.rlp_bytes(); - let signature = self.0.sign(&rlp_encoded); - TransactionLegacySigned::from(tx, signature.as_ref()) + pub fn sign_transaction(&self, tx: TransactionUnsigned) -> TransactionSigned { + let payload = tx.unsigned_payload(); + let signature = self.0.sign(&payload).0; + tx.with_signature(signature) } } + +#[test] +fn from_secret_key_works() { + let account = Account::from_secret_key(hex_literal::hex!( + "a872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f" + )); + + assert_eq!( + account.address(), + H160::from(hex_literal::hex!("75e480db528101a381ce68544611c169ad7eb342")) + ) +} diff --git a/substrate/frame/revive/src/evm/api/rlp_codec.rs b/substrate/frame/revive/src/evm/api/rlp_codec.rs index e5f24c28a482..9b61cd042ec5 100644 --- a/substrate/frame/revive/src/evm/api/rlp_codec.rs +++ b/substrate/frame/revive/src/evm/api/rlp_codec.rs @@ -21,17 +21,84 @@ use super::*; use alloc::vec::Vec; use rlp::{Decodable, Encodable}; -impl TransactionLegacyUnsigned { - /// Get the rlp encoded bytes of a signed transaction with a dummy 65 bytes signature. - pub fn dummy_signed_payload(&self) -> Vec { +impl TransactionUnsigned { + /// Return the bytes to be signed by the private key. + pub fn unsigned_payload(&self) -> Vec { + use TransactionUnsigned::*; let mut s = rlp::RlpStream::new(); - s.append(self); - const DUMMY_SIGNATURE: [u8; 65] = [0u8; 65]; - s.append_raw(&DUMMY_SIGNATURE.as_ref(), 1); + match self { + Transaction2930Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction1559Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction4844Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + TransactionLegacyUnsigned(ref tx) => { + s.append(tx); + }, + } + s.out().to_vec() } } +impl TransactionSigned { + /// Encode the Ethereum transaction into bytes. + pub fn signed_payload(&self) -> Vec { + use TransactionSigned::*; + let mut s = rlp::RlpStream::new(); + match self { + Transaction2930Signed(ref tx) => { + s.append(&tx.transaction_2930_unsigned.r#type.value()); + s.append(tx); + }, + Transaction1559Signed(ref tx) => { + s.append(&tx.transaction_1559_unsigned.r#type.value()); + s.append(tx); + }, + Transaction4844Signed(ref tx) => { + s.append(&tx.transaction_4844_unsigned.r#type.value()); + s.append(tx); + }, + TransactionLegacySigned(ref tx) => { + s.append(tx); + }, + } + + s.out().to_vec() + } + + /// Decode the Ethereum transaction from bytes. + pub fn decode(data: &[u8]) -> Result { + if data.len() < 1 { + return Err(rlp::DecoderError::RlpIsTooShort); + } + match data[0] { + TYPE_EIP2930 => rlp::decode::(&data[1..]).map(Into::into), + TYPE_EIP1559 => rlp::decode::(&data[1..]).map(Into::into), + TYPE_EIP4844 => rlp::decode::(&data[1..]).map(Into::into), + _ => rlp::decode::(data).map(Into::into), + } + } +} + +impl TransactionUnsigned { + /// Get a signed transaction payload with a dummy 65 bytes signature. + pub fn dummy_signed_payload(&self) -> Vec { + const DUMMY_SIGNATURE: [u8; 65] = [0u8; 65]; + self.unsigned_payload() + .into_iter() + .chain(DUMMY_SIGNATURE.iter().copied()) + .collect::>() + } +} + /// See impl Encodable for TransactionLegacyUnsigned { fn rlp_append(&self, s: &mut rlp::RlpStream) { @@ -47,8 +114,8 @@ impl Encodable for TransactionLegacyUnsigned { s.append(&self.value); s.append(&self.input.0); s.append(&chain_id); - s.append(&0_u8); - s.append(&0_u8); + s.append(&0u8); + s.append(&0u8); } else { s.begin_list(6); s.append(&self.nonce); @@ -64,7 +131,6 @@ impl Encodable for TransactionLegacyUnsigned { } } -/// See impl Decodable for TransactionLegacyUnsigned { fn decode(rlp: &rlp::Rlp) -> Result { Ok(TransactionLegacyUnsigned { @@ -95,16 +161,18 @@ impl Decodable for TransactionLegacyUnsigned { impl Encodable for TransactionLegacySigned { fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_legacy_unsigned; + s.begin_list(9); - s.append(&self.transaction_legacy_unsigned.nonce); - s.append(&self.transaction_legacy_unsigned.gas_price); - s.append(&self.transaction_legacy_unsigned.gas); - match self.transaction_legacy_unsigned.to { + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + match tx.to { Some(ref to) => s.append(to), None => s.append_empty_data(), }; - s.append(&self.transaction_legacy_unsigned.value); - s.append(&self.transaction_legacy_unsigned.input.0); + s.append(&tx.value); + s.append(&tx.input.0); s.append(&self.v); s.append(&self.r); @@ -112,6 +180,232 @@ impl Encodable for TransactionLegacySigned { } } +impl Encodable for AccessListEntry { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(2); + s.append(&self.address); + s.append_list(&self.storage_keys); + } +} + +impl Decodable for AccessListEntry { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(AccessListEntry { address: rlp.val_at(0)?, storage_keys: rlp.list_at(1)? }) + } +} + +/// See +impl Encodable for Transaction1559Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(9); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.max_priority_fee_per_gas); + s.append(&self.max_fee_per_gas); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + } +} + +/// See +impl Encodable for Transaction1559Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_1559_unsigned; + s.begin_list(12); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.max_priority_fee_per_gas); + s.append(&tx.max_fee_per_gas); + s.append(&tx.gas); + match tx.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction1559Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction1559Signed { + transaction_1559_unsigned: { + Transaction1559Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + max_priority_fee_per_gas: rlp.val_at(2)?, + max_fee_per_gas: rlp.val_at(3)?, + gas: rlp.val_at(4)?, + to: { + let to = rlp.at(5)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(6)?, + input: Bytes(rlp.val_at(7)?), + access_list: rlp.list_at(8)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(9)?, + r: rlp.val_at(10)?, + s: rlp.val_at(11)?, + ..Default::default() + }) + } +} + +//See https://eips.ethereum.org/EIPS/eip-2930 +impl Encodable for Transaction2930Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(8); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + } +} + +//See https://eips.ethereum.org/EIPS/eip-2930 +impl Encodable for Transaction2930Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_2930_unsigned; + s.begin_list(11); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + match tx.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction2930Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction2930Signed { + transaction_2930_unsigned: { + Transaction2930Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + gas_price: rlp.val_at(2)?, + gas: rlp.val_at(3)?, + to: { + let to = rlp.at(4)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(5)?, + input: Bytes(rlp.val_at(6)?), + access_list: rlp.list_at(7)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(8)?, + r: rlp.val_at(9)?, + s: rlp.val_at(10)?, + ..Default::default() + }) + } +} + +//See https://eips.ethereum.org/EIPS/eip-4844 +impl Encodable for Transaction4844Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(11); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.max_priority_fee_per_gas); + s.append(&self.max_fee_per_gas); + s.append(&self.gas); + s.append(&self.to); + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + s.append(&self.max_fee_per_blob_gas); + s.append_list(&self.blob_versioned_hashes); + } +} + +//See https://eips.ethereum.org/EIPS/eip-4844 +impl Encodable for Transaction4844Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_4844_unsigned; + s.begin_list(14); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.max_priority_fee_per_gas); + s.append(&tx.max_fee_per_gas); + s.append(&tx.gas); + s.append(&tx.to); + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + s.append(&tx.max_fee_per_blob_gas); + s.append_list(&tx.blob_versioned_hashes); + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction4844Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction4844Signed { + transaction_4844_unsigned: { + Transaction4844Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + max_priority_fee_per_gas: rlp.val_at(2)?, + max_fee_per_gas: rlp.val_at(3)?, + gas: rlp.val_at(4)?, + to: rlp.val_at(5)?, + value: rlp.val_at(6)?, + input: Bytes(rlp.val_at(7)?), + access_list: rlp.list_at(8)?, + max_fee_per_blob_gas: rlp.val_at(9)?, + blob_versioned_hashes: rlp.list_at(10)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(11)?, + r: rlp.val_at(12)?, + s: rlp.val_at(13)?, + }) + } +} + /// See impl Decodable for TransactionLegacySigned { fn decode(rlp: &rlp::Rlp) -> Result { @@ -142,7 +436,7 @@ impl Decodable for TransactionLegacySigned { value: rlp.val_at(4)?, input: Bytes(rlp.val_at(5)?), chain_id: extract_chain_id(v).map(|v| v.into()), - r#type: Type0 {}, + r#type: TypeLegacy {}, } }, v, @@ -157,31 +451,123 @@ mod test { use super::*; #[test] - fn encode_decode_legacy_transaction_works() { - let tx = TransactionLegacyUnsigned { - chain_id: Some(596.into()), - gas: U256::from(21000), - nonce: U256::from(1), - gas_price: U256::from("0x640000006a"), - to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), - value: U256::from(123123), - input: Bytes(vec![]), - r#type: Type0, - }; + fn encode_decode_tx_works() { + let txs = [ + // Legacy + ( + "f86080808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d87808025a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x0", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "v": "0x25" + } + "# + ), + // type 1: EIP2930 + ( + "01f89b0180808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x1", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" + } + "# + ), + // type 2: EIP1559 + ( + "02f89c018080018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "maxFeePerGas": "0x1", + "maxPriorityFeePerGas": "0x0", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x2", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" - let rlp_bytes = rlp::encode(&tx); - let decoded = rlp::decode::(&rlp_bytes).unwrap(); - assert_eq!(&tx, &decoded); + } + "# + ), + // type 3: EIP4844 + ( - let tx = Account::default().sign_transaction(tx); - let rlp_bytes = rlp::encode(&tx); - let decoded = rlp::decode::(&rlp_bytes).unwrap(); - assert_eq!(&tx, &decoded); + "03f8bf018002018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "blobVersionedHashes": ["0x0000000000000000000000000000000000000000000000000000000000000000"], + "chainId": "0x1", + "gas": "0x1e241", + "input": "0x", + "maxFeePerBlobGas": "0x0", + "maxFeePerGas": "0x1", + "maxPriorityFeePerGas": "0x2", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x3", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" + } + "# + ) + ]; + + for (tx, json) in txs { + let raw_tx = hex::decode(tx).unwrap(); + let tx = TransactionSigned::decode(&raw_tx).unwrap(); + assert_eq!(tx.signed_payload(), raw_tx); + let expected_tx = serde_json::from_str(json).unwrap(); + assert_eq!(tx, expected_tx); + } } #[test] fn dummy_signed_payload_works() { - let tx = TransactionLegacyUnsigned { + let tx: TransactionUnsigned = TransactionLegacyUnsigned { chain_id: Some(596.into()), gas: U256::from(21000), nonce: U256::from(1), @@ -189,31 +575,12 @@ mod test { to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), value: U256::from(123123), input: Bytes(vec![]), - r#type: Type0, - }; - - let signed_tx = Account::default().sign_transaction(tx.clone()); - let rlp_bytes = rlp::encode(&signed_tx); - assert_eq!(tx.dummy_signed_payload().len(), rlp_bytes.len()); - } - - #[test] - fn recover_address_works() { - let account = Account::default(); - - let unsigned_tx = TransactionLegacyUnsigned { - value: 200_000_000_000_000_000_000u128.into(), - gas_price: 100_000_000_200u64.into(), - gas: 100_107u32.into(), - nonce: 3.into(), - to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), - chain_id: Some(596.into()), - ..Default::default() - }; - - let tx = account.sign_transaction(unsigned_tx.clone()); - let recovered_address = tx.recover_eth_address().unwrap(); + r#type: TypeLegacy, + } + .into(); - assert_eq!(account.address(), recovered_address); + let dummy_signed_payload = tx.dummy_signed_payload(); + let payload = Account::default().sign_transaction(tx).signed_payload(); + assert_eq!(dummy_signed_payload.len(), payload.len()); } } diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index dd1a2642724d..ed046cb4da44 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -16,7 +16,29 @@ // limitations under the License. //! Utility impl for the RPC types. use super::*; -use sp_core::U256; +use alloc::vec::Vec; +use sp_core::{H160, U256}; + +impl From for BlockNumberOrTagOrHash { + fn from(b: BlockNumberOrTag) -> Self { + match b { + BlockNumberOrTag::U256(n) => BlockNumberOrTagOrHash::U256(n), + BlockNumberOrTag::BlockTag(t) => BlockNumberOrTagOrHash::BlockTag(t), + } + } +} + +impl From for TransactionUnsigned { + fn from(tx: TransactionSigned) -> Self { + use TransactionSigned::*; + match tx { + Transaction4844Signed(tx) => tx.transaction_4844_unsigned.into(), + Transaction1559Signed(tx) => tx.transaction_1559_unsigned.into(), + Transaction2930Signed(tx) => tx.transaction_2930_unsigned.into(), + TransactionLegacySigned(tx) => tx.transaction_legacy_unsigned.into(), + } + } +} impl TransactionInfo { /// Create a new [`TransactionInfo`] from a receipt and a signed transaction. @@ -138,3 +160,133 @@ fn logs_bloom_works() { .unwrap(); assert_eq!(receipt.logs_bloom, ReceiptInfo::logs_bloom(&receipt.logs)); } + +impl GenericTransaction { + /// Create a new [`GenericTransaction`] from a signed transaction. + pub fn from_signed(tx: TransactionSigned, from: Option) -> Self { + Self::from_unsigned(tx.into(), from) + } + + /// Create a new [`GenericTransaction`] from a unsigned transaction. + pub fn from_unsigned(tx: TransactionUnsigned, from: Option) -> Self { + use TransactionUnsigned::*; + match tx { + TransactionLegacyUnsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: tx.chain_id, + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + ..Default::default() + }, + Transaction4844Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: Some(tx.to), + gas: Some(tx.gas), + gas_price: Some(tx.max_fee_per_blob_gas), + access_list: Some(tx.access_list), + blob_versioned_hashes: tx.blob_versioned_hashes, + max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() + }, + Transaction1559Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() + }, + Transaction2930Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + ..Default::default() + }, + } + } + + /// Convert to a [`TransactionUnsigned`]. + pub fn try_into_unsigned(self) -> Result { + match self.r#type.unwrap_or_default().0 { + TYPE_LEGACY => Ok(TransactionLegacyUnsigned { + r#type: TypeLegacy {}, + chain_id: self.chain_id, + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + } + .into()), + TYPE_EIP1559 => Ok(Transaction1559Unsigned { + r#type: TypeEip1559 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), + max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), + } + .into()), + TYPE_EIP2930 => Ok(Transaction2930Unsigned { + r#type: TypeEip2930 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + } + .into()), + TYPE_EIP4844 => Ok(Transaction4844Unsigned { + r#type: TypeEip4844 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to.unwrap_or_default(), + gas: self.gas.unwrap_or_default(), + max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), + max_fee_per_blob_gas: self.max_fee_per_blob_gas.unwrap_or_default(), + max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + blob_versioned_hashes: self.blob_versioned_hashes, + } + .into()), + _ => Err(()), + } + } +} diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 48045a6acc86..1d65fdefdde6 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -17,7 +17,7 @@ //! Generated JSON-RPC types. #![allow(missing_docs)] -use super::{byte::*, Type0, Type1, Type2, Type3}; +use super::{byte::*, TypeEip1559, TypeEip2930, TypeEip4844, TypeLegacy}; use alloc::vec::Vec; use codec::{Decode, Encode}; use derive_more::{From, TryInto}; @@ -94,8 +94,8 @@ pub struct Block { /// Uncles pub uncles: Vec, /// Withdrawals - #[serde(skip_serializing_if = "Option::is_none")] - pub withdrawals: Option>, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub withdrawals: Vec, /// Withdrawals root #[serde(rename = "withdrawalsRoot", skip_serializing_if = "Option::is_none")] pub withdrawals_root: Option, @@ -114,7 +114,7 @@ pub enum BlockNumberOrTag { } impl Default for BlockNumberOrTag { fn default() -> Self { - BlockNumberOrTag::U256(Default::default()) + BlockNumberOrTag::BlockTag(Default::default()) } } @@ -133,7 +133,7 @@ pub enum BlockNumberOrTagOrHash { } impl Default for BlockNumberOrTagOrHash { fn default() -> Self { - BlockNumberOrTagOrHash::U256(Default::default()) + BlockNumberOrTagOrHash::BlockTag(Default::default()) } } @@ -148,12 +148,12 @@ pub struct GenericTransaction { pub access_list: Option, /// blobVersionedHashes /// List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. - #[serde(rename = "blobVersionedHashes", skip_serializing_if = "Option::is_none")] - pub blob_versioned_hashes: Option>, + #[serde(rename = "blobVersionedHashes", default, skip_serializing_if = "Vec::is_empty")] + pub blob_versioned_hashes: Vec, /// blobs /// Raw blob data. - #[serde(skip_serializing_if = "Option::is_none")] - pub blobs: Option>, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub blobs: Vec, /// chainId /// Chain ID that this transaction is valid on. #[serde(rename = "chainId", skip_serializing_if = "Option::is_none")] @@ -319,7 +319,7 @@ pub enum TransactionUnsigned { } impl Default for TransactionUnsigned { fn default() -> Self { - TransactionUnsigned::Transaction4844Unsigned(Default::default()) + TransactionUnsigned::TransactionLegacyUnsigned(Default::default()) } } @@ -341,13 +341,13 @@ pub type AccessList = Vec; )] pub enum BlockTag { #[serde(rename = "earliest")] - #[default] Earliest, #[serde(rename = "finalized")] Finalized, #[serde(rename = "safe")] Safe, #[serde(rename = "latest")] + #[default] Latest, #[serde(rename = "pending")] Pending, @@ -392,7 +392,7 @@ pub struct Log { #[serde(skip_serializing_if = "Option::is_none")] pub removed: Option, /// topics - #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(default, skip_serializing_if = "Vec::is_empty")] pub topics: Vec, /// transaction hash #[serde(rename = "transactionHash")] @@ -455,7 +455,7 @@ pub struct Transaction1559Unsigned { /// to address pub to: Option
, /// type - pub r#type: Type2, + pub r#type: TypeEip1559, /// value pub value: U256, } @@ -486,7 +486,7 @@ pub struct Transaction2930Unsigned { /// to address pub to: Option
, /// type - pub r#type: Type1, + pub r#type: TypeEip2930, /// value pub value: U256, } @@ -530,7 +530,7 @@ pub struct Transaction4844Unsigned { /// to address pub to: Address, /// type - pub r#type: Type3, + pub r#type: TypeEip4844, /// value pub value: U256, } @@ -557,7 +557,7 @@ pub struct TransactionLegacyUnsigned { /// to address pub to: Option
, /// type - pub r#type: Type0, + pub r#type: TypeLegacy, /// value pub value: U256, } @@ -574,7 +574,7 @@ pub enum TransactionSigned { } impl Default for TransactionSigned { fn default() -> Self { - TransactionSigned::Transaction4844Signed(Default::default()) + TransactionSigned::TransactionLegacySigned(Default::default()) } } @@ -622,8 +622,8 @@ pub struct Transaction1559Signed { pub v: Option, /// yParity /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. - #[serde(rename = "yParity", skip_serializing_if = "Option::is_none")] - pub y_parity: Option, + #[serde(rename = "yParity")] + pub y_parity: U256, } /// Signed 2930 Transaction @@ -661,8 +661,8 @@ pub struct Transaction4844Signed { pub s: U256, /// yParity /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. - #[serde(rename = "yParity", skip_serializing_if = "Option::is_none")] - pub y_parity: Option, + #[serde(rename = "yParity")] + pub y_parity: U256, } /// Signed Legacy Transaction diff --git a/substrate/frame/revive/src/evm/api/signature.rs b/substrate/frame/revive/src/evm/api/signature.rs index 957d50c8e324..9f39b92b461e 100644 --- a/substrate/frame/revive/src/evm/api/signature.rs +++ b/substrate/frame/revive/src/evm/api/signature.rs @@ -15,49 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. //! Ethereum signature utilities -use super::{TransactionLegacySigned, TransactionLegacyUnsigned}; -use rlp::Encodable; +use super::*; use sp_core::{H160, U256}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; -impl TransactionLegacyUnsigned { - /// Recover the Ethereum address, from an RLP encoded transaction and a 65 bytes signature. - pub fn recover_eth_address(rlp_encoded: &[u8], signature: &[u8; 65]) -> Result { - let hash = keccak_256(rlp_encoded); - let mut addr = H160::default(); - let pk = secp256k1_ecdsa_recover(&signature, &hash).map_err(|_| ())?; - addr.assign_from_slice(&keccak_256(&pk[..])[12..]); - - Ok(addr) - } -} - impl TransactionLegacySigned { - /// Create a signed transaction from an [`TransactionLegacyUnsigned`] and a signature. - pub fn from( - transaction_legacy_unsigned: TransactionLegacyUnsigned, - signature: &[u8; 65], - ) -> TransactionLegacySigned { - let r = U256::from_big_endian(&signature[..32]); - let s = U256::from_big_endian(&signature[32..64]); - let recovery_id = signature[64] as u32; - let v = transaction_legacy_unsigned - .chain_id - .map(|chain_id| chain_id * 2 + 35 + recovery_id) - .unwrap_or_else(|| U256::from(27) + recovery_id); - - TransactionLegacySigned { transaction_legacy_unsigned, r, s, v } - } - - /// Get the raw 65 bytes signature from the signed transaction. - pub fn raw_signature(&self) -> Result<[u8; 65], ()> { - let mut s = [0u8; 65]; - self.r.write_as_big_endian(s[0..32].as_mut()); - self.s.write_as_big_endian(s[32..64].as_mut()); - s[64] = self.extract_recovery_id().ok_or(())?; - Ok(s) - } - /// Get the recovery ID from the signed transaction. /// See https://eips.ethereum.org/EIPS/eip-155 fn extract_recovery_id(&self) -> Option { @@ -71,10 +33,154 @@ impl TransactionLegacySigned { self.v.try_into().ok() } } +} + +impl TransactionUnsigned { + /// Extract the unsigned transaction from a signed transaction. + pub fn from_signed(tx: TransactionSigned) -> Self { + match tx { + TransactionSigned::TransactionLegacySigned(signed) => + Self::TransactionLegacyUnsigned(signed.transaction_legacy_unsigned), + TransactionSigned::Transaction4844Signed(signed) => + Self::Transaction4844Unsigned(signed.transaction_4844_unsigned), + TransactionSigned::Transaction1559Signed(signed) => + Self::Transaction1559Unsigned(signed.transaction_1559_unsigned), + TransactionSigned::Transaction2930Signed(signed) => + Self::Transaction2930Unsigned(signed.transaction_2930_unsigned), + } + } + + /// Create a signed transaction from an [`TransactionUnsigned`] and a signature. + pub fn with_signature(self, signature: [u8; 65]) -> TransactionSigned { + let r = U256::from_big_endian(&signature[..32]); + let s = U256::from_big_endian(&signature[32..64]); + let recovery_id = signature[64]; + + match self { + TransactionUnsigned::Transaction2930Unsigned(transaction_2930_unsigned) => + Transaction2930Signed { + transaction_2930_unsigned, + r, + s, + v: None, + y_parity: U256::from(recovery_id), + } + .into(), + TransactionUnsigned::Transaction1559Unsigned(transaction_1559_unsigned) => + Transaction1559Signed { + transaction_1559_unsigned, + r, + s, + v: None, + y_parity: U256::from(recovery_id), + } + .into(), + + TransactionUnsigned::Transaction4844Unsigned(transaction_4844_unsigned) => + Transaction4844Signed { + transaction_4844_unsigned, + r, + s, + y_parity: U256::from(recovery_id), + } + .into(), + + TransactionUnsigned::TransactionLegacyUnsigned(transaction_legacy_unsigned) => { + let v = transaction_legacy_unsigned + .chain_id + .map(|chain_id| { + chain_id + .saturating_mul(U256::from(2)) + .saturating_add(U256::from(35u32 + recovery_id as u32)) + }) + .unwrap_or_else(|| U256::from(27u32 + recovery_id as u32)); + + TransactionLegacySigned { transaction_legacy_unsigned, r, s, v }.into() + }, + } + } +} + +impl TransactionSigned { + /// Get the raw 65 bytes signature from the signed transaction. + pub fn raw_signature(&self) -> Result<[u8; 65], ()> { + use TransactionSigned::*; + let (r, s, v) = match self { + TransactionLegacySigned(tx) => (tx.r, tx.s, tx.extract_recovery_id().ok_or(())?), + Transaction4844Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + Transaction1559Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + Transaction2930Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + }; + let mut sig = [0u8; 65]; + r.write_as_big_endian(sig[0..32].as_mut()); + s.write_as_big_endian(sig[32..64].as_mut()); + sig[64] = v; + Ok(sig) + } - /// Recover the Ethereum address from the signed transaction. + /// Recover the Ethereum address, from a signed transaction. pub fn recover_eth_address(&self) -> Result { - let rlp_encoded = self.transaction_legacy_unsigned.rlp_bytes(); - TransactionLegacyUnsigned::recover_eth_address(&rlp_encoded, &self.raw_signature()?) + use TransactionSigned::*; + + let mut s = rlp::RlpStream::new(); + match self { + TransactionLegacySigned(tx) => { + let tx = &tx.transaction_legacy_unsigned; + s.append(tx); + }, + Transaction4844Signed(tx) => { + let tx = &tx.transaction_4844_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction1559Signed(tx) => { + let tx = &tx.transaction_1559_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction2930Signed(tx) => { + let tx = &tx.transaction_2930_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + } + let bytes = s.out().to_vec(); + let signature = self.raw_signature()?; + + let hash = keccak_256(&bytes); + let mut addr = H160::default(); + let pk = secp256k1_ecdsa_recover(&signature, &hash).map_err(|_| ())?; + addr.assign_from_slice(&keccak_256(&pk[..])[12..]); + Ok(addr) + } +} + +#[test] +fn sign_and_recover_work() { + use crate::evm::TransactionUnsigned; + let txs = [ + // Legacy + "f86080808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d87808026a07b2e762a17a71a46b422e60890a04512cf0d907ccf6b78b5bd6e6977efdc2bf5a01ea673d50bbe7c2236acb498ceb8346a8607c941f0b8cbcde7cf439aa9369f1f", + //// type 1: EIP2930 + "01f89b0180808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0c45a61b3d1d00169c649e7326e02857b850efb96e587db4b9aad29afc80d0752a070ae1eb47ab4097dbed2f19172ae286492621b46ac737ee6c32fb18a00c94c9c", + // type 2: EIP1559 + "02f89c018080018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a055d72bbc3047d4b9d3e4b8099f187143202407746118204cc2e0cb0c85a68baea04f6ef08a1418c70450f53398d9f0f2d78d9e9d6b8a80cba886b67132c4a744f2", + // type 3: EIP4844 + "03f8bf018002018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080e1a0000000000000000000000000000000000000000000000000000000000000000001a0672b8bac466e2cf1be3148c030988d40d582763ecebbc07700dfc93bb070d8a4a07c635887005b11cb58964c04669ac2857fa633aa66f662685dadfd8bcacb0f21", + ]; + let account = Account::from_secret_key(hex_literal::hex!( + "a872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f" + )); + + for tx in txs { + let raw_tx = hex::decode(tx).unwrap(); + let tx = TransactionSigned::decode(&raw_tx).unwrap(); + + let address = tx.recover_eth_address(); + assert_eq!(address.unwrap(), account.address()); + + let unsigned = TransactionUnsigned::from_signed(tx.clone()); + let signed = account.sign_transaction(unsigned); + assert_eq!(tx, signed); } } diff --git a/substrate/frame/revive/src/evm/api/type_id.rs b/substrate/frame/revive/src/evm/api/type_id.rs index 7434ca6e9b7f..c6e018a379b3 100644 --- a/substrate/frame/revive/src/evm/api/type_id.rs +++ b/substrate/frame/revive/src/evm/api/type_id.rs @@ -17,6 +17,7 @@ //! Ethereum Typed Transaction types use super::Byte; use codec::{Decode, Encode}; +use paste::paste; use rlp::Decodable; use scale_info::TypeInfo; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -29,8 +30,14 @@ macro_rules! transaction_type { #[derive(Clone, Default, Debug, Eq, PartialEq)] pub struct $name; + // upper case const name + paste! { + #[doc = concat!("Transaction value for type identifier: ", $value)] + pub const [<$name:snake:upper>]: u8 = $value; + } + impl $name { - /// Get the value of the type + /// Convert to u8 pub fn value(&self) -> u8 { $value } @@ -107,7 +114,12 @@ macro_rules! transaction_type { }; } -transaction_type!(Type0, 0); -transaction_type!(Type1, 1); -transaction_type!(Type2, 2); -transaction_type!(Type3, 3); +transaction_type!(TypeLegacy, 0); +transaction_type!(TypeEip2930, 1); +transaction_type!(TypeEip1559, 2); +transaction_type!(TypeEip4844, 3); + +#[test] +fn transaction_type() { + assert_eq!(TYPE_EIP2930, 1u8); +} diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index 21294fdf6baa..24b75de83569 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -16,7 +16,7 @@ // limitations under the License. //! Runtime types for integrating `pallet-revive` with the EVM. use crate::{ - evm::api::{TransactionLegacySigned, TransactionLegacyUnsigned}, + evm::api::{GenericTransaction, TransactionSigned}, AccountIdOf, AddressMapper, BalanceOf, MomentOf, Weight, LOG_TARGET, }; use codec::{Decode, Encode}; @@ -92,8 +92,12 @@ impl ExtrinsicLike impl ExtrinsicMetadata for UncheckedExtrinsic { - const VERSION: u8 = - generic::UncheckedExtrinsic::, Signature, E::Extension>::VERSION; + const VERSIONS: &'static [u8] = generic::UncheckedExtrinsic::< + Address, + CallOf, + Signature, + E::Extension, + >::VERSIONS; type TransactionExtensions = E::Extension; } @@ -293,7 +297,7 @@ pub trait EthExtra { CallOf: From>, ::Hash: frame_support::traits::IsType, { - let tx = rlp::decode::(&payload).map_err(|err| { + let tx = TransactionSigned::decode(&payload).map_err(|err| { log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); InvalidTransaction::Call })?; @@ -305,33 +309,33 @@ pub trait EthExtra { let signer = ::AddressMapper::to_fallback_account_id(&signer); - let TransactionLegacyUnsigned { nonce, chain_id, to, value, input, gas, gas_price, .. } = - tx.transaction_legacy_unsigned; + let GenericTransaction { nonce, chain_id, to, value, input, gas, gas_price, .. } = + GenericTransaction::from_signed(tx, None); if chain_id.unwrap_or_default() != ::ChainId::get().into() { log::debug!(target: LOG_TARGET, "Invalid chain_id {chain_id:?}"); return Err(InvalidTransaction::Call); } - let value = crate::Pallet::::convert_evm_to_native(value).map_err(|err| { - log::debug!(target: LOG_TARGET, "Failed to convert value to native: {err:?}"); - InvalidTransaction::Call - })?; + let value = crate::Pallet::::convert_evm_to_native(value.unwrap_or_default()) + .map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to convert value to native: {err:?}"); + InvalidTransaction::Call + })?; + let data = input.unwrap_or_default().0; let call = if let Some(dest) = to { crate::Call::call:: { dest, value, gas_limit, storage_deposit_limit, - data: input.0, + data, } } else { - let blob = match polkavm::ProgramBlob::blob_length(&input.0) { - Some(blob_len) => blob_len - .try_into() - .ok() - .and_then(|blob_len| (input.0.split_at_checked(blob_len))), + let blob = match polkavm::ProgramBlob::blob_length(&data) { + Some(blob_len) => + blob_len.try_into().ok().and_then(|blob_len| (data.split_at_checked(blob_len))), _ => None, }; @@ -350,18 +354,18 @@ pub trait EthExtra { } }; - let nonce = nonce.try_into().map_err(|_| InvalidTransaction::Call)?; + let nonce = nonce.unwrap_or_default().try_into().map_err(|_| InvalidTransaction::Call)?; // Fees calculated with the fixed `GAS_PRICE` // When we dry-run the transaction, we set the gas to `Fee / GAS_PRICE` let eth_fee_no_tip = U256::from(GAS_PRICE) - .saturating_mul(gas) + .saturating_mul(gas.unwrap_or_default()) .try_into() .map_err(|_| InvalidTransaction::Call)?; // Fees with the actual gas_price from the transaction. - let eth_fee: BalanceOf = U256::from(gas_price) - .saturating_mul(gas) + let eth_fee: BalanceOf = U256::from(gas_price.unwrap_or_default()) + .saturating_mul(gas.unwrap_or_default()) .try_into() .map_err(|_| InvalidTransaction::Call)?; @@ -414,7 +418,6 @@ mod test { }; use frame_support::{error::LookupError, traits::fungible::Mutate}; use pallet_revive_fixtures::compile_module; - use rlp::Encodable; use sp_runtime::{ traits::{Checkable, DispatchTransaction}, MultiAddress, MultiSignature, @@ -452,236 +455,265 @@ mod test { /// A builder for creating an unchecked extrinsic, and test that the check function works. #[derive(Clone)] struct UncheckedExtrinsicBuilder { - tx: TransactionLegacyUnsigned, + tx: GenericTransaction, gas_limit: Weight, storage_deposit_limit: BalanceOf, + before_validate: Option>, } impl UncheckedExtrinsicBuilder { /// Create a new builder with default values. fn new() -> Self { Self { - tx: TransactionLegacyUnsigned { + tx: GenericTransaction { + from: Some(Account::default().address()), chain_id: Some(::ChainId::get().into()), - gas_price: U256::from(GAS_PRICE), + gas_price: Some(U256::from(GAS_PRICE)), ..Default::default() }, gas_limit: Weight::zero(), storage_deposit_limit: 0, + before_validate: None, } } fn estimate_gas(&mut self) { - let dry_run = crate::Pallet::::bare_eth_transact( - Account::default().substrate_account(), - self.tx.to, - self.tx.value.try_into().unwrap(), - self.tx.input.clone().0, - Weight::MAX, - u64::MAX, - |call| { + let dry_run = + crate::Pallet::::bare_eth_transact(self.tx.clone(), Weight::MAX, |call| { let call = RuntimeCall::Contracts(call); let uxt: Ex = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); uxt.encoded_size() as u32 + }); + + match dry_run { + Ok(dry_run) => { + log::debug!(target: LOG_TARGET, "Estimated gas: {:?}", dry_run.eth_gas); + self.tx.gas = Some(dry_run.eth_gas); + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to estimate gas: {:?}", err); }, - crate::DebugInfo::Skip, - crate::CollectEvents::Skip, - ); - self.tx.gas = ((dry_run.fee + GAS_PRICE as u64) / (GAS_PRICE as u64)).into(); + } } /// Create a new builder with a call to the given address. fn call_with(dest: H160) -> Self { let mut builder = Self::new(); builder.tx.to = Some(dest); - builder.estimate_gas(); + ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); builder } /// Create a new builder with an instantiate call. fn instantiate_with(code: Vec, data: Vec) -> Self { let mut builder = Self::new(); - builder.tx.input = Bytes(code.into_iter().chain(data.into_iter()).collect()); - builder.estimate_gas(); + builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect())); + ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); builder } /// Update the transaction with the given function. - fn update(mut self, f: impl FnOnce(&mut TransactionLegacyUnsigned) -> ()) -> Self { + fn update(mut self, f: impl FnOnce(&mut GenericTransaction) -> ()) -> Self { f(&mut self.tx); self } + /// Set before_validate function. + fn before_validate(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { + self.before_validate = Some(std::sync::Arc::new(f)); + self + } /// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension. fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> { - let UncheckedExtrinsicBuilder { tx, gas_limit, storage_deposit_limit } = self.clone(); - - // Fund the account. - let account = Account::default(); - let _ = ::Currency::set_balance( - &account.substrate_account(), - 100_000_000_000_000, - ); - - let payload = account.sign_transaction(tx).rlp_bytes().to_vec(); - let call = RuntimeCall::Contracts(crate::Call::eth_transact { - payload, - gas_limit, - storage_deposit_limit, - }); - - let encoded_len = call.encoded_size(); - let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); - let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; - let (account_id, extra): (AccountId32, SignedExtra) = match result.format { - ExtrinsicFormat::Signed(signer, extra) => (signer, extra), - _ => unreachable!(), - }; - - extra.clone().validate_and_prepare( - RuntimeOrigin::signed(account_id), - &result.function, - &result.function.get_dispatch_info(), - encoded_len, - 0, - )?; + ExtBuilder::default().build().execute_with(|| { + let UncheckedExtrinsicBuilder { + tx, + gas_limit, + storage_deposit_limit, + before_validate, + } = self.clone(); + + // Fund the account. + let account = Account::default(); + let _ = ::Currency::set_balance( + &account.substrate_account(), + 100_000_000_000_000, + ); + + let payload = + account.sign_transaction(tx.try_into_unsigned().unwrap()).signed_payload(); + let call = RuntimeCall::Contracts(crate::Call::eth_transact { + payload, + gas_limit, + storage_deposit_limit, + }); + + let encoded_len = call.encoded_size(); + let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); + let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; + let (account_id, extra): (AccountId32, SignedExtra) = match result.format { + ExtrinsicFormat::Signed(signer, extra) => (signer, extra), + _ => unreachable!(), + }; - Ok((result.function, extra)) + before_validate.map(|f| f()); + extra.clone().validate_and_prepare( + RuntimeOrigin::signed(account_id), + &result.function, + &result.function.get_dispatch_info(), + encoded_len, + 0, + )?; + + Ok((result.function, extra)) + }) } } #[test] fn check_eth_transact_call_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); - assert_eq!( - builder.check().unwrap().0, - crate::Call::call:: { - dest: builder.tx.to.unwrap(), - value: builder.tx.value.as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - data: builder.tx.input.0 - } - .into() - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + assert_eq!( + builder.check().unwrap().0, + crate::Call::call:: { + dest: builder.tx.to.unwrap(), + value: builder.tx.value.unwrap_or_default().as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + data: builder.tx.input.unwrap_or_default().0 + } + .into() + ); } #[test] fn check_eth_transact_instantiate_works() { - ExtBuilder::default().build().execute_with(|| { - let (code, _) = compile_module("dummy").unwrap(); - let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); - - assert_eq!( - builder.check().unwrap().0, - crate::Call::instantiate_with_code:: { - value: builder.tx.value.as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - code, - data, - salt: None - } - .into() - ); - }); + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + assert_eq!( + builder.check().unwrap().0, + crate::Call::instantiate_with_code:: { + value: builder.tx.value.unwrap_or_default().as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + code, + data, + salt: None + } + .into() + ); } #[test] fn check_eth_transact_nonce_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.nonce = 1u32.into()); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) - ); - - >::inc_account_nonce(Account::default().substrate_account()); - - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.nonce = Some(1u32.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) + ); + + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).before_validate(|| { + >::inc_account_nonce(Account::default().substrate_account()); + }); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) + ); } #[test] fn check_eth_transact_chain_id_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.chain_id = Some(42.into())); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.chain_id = Some(42.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); } #[test] fn check_instantiate_data() { - ExtBuilder::default().build().execute_with(|| { - let code = b"invalid code".to_vec(); - let data = vec![1]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); - - // Fail because the tx input fail to get the blob length - assert_eq!( - builder.clone().update(|tx| tx.input = Bytes(vec![1, 2, 3])).check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); - }); + let code = b"invalid code".to_vec(); + let data = vec![1]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + // Fail because the tx input fail to get the blob length + assert_eq!( + builder.clone().update(|tx| tx.input = Some(Bytes(vec![1, 2, 3]))).check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); } #[test] fn check_transaction_fees() { - ExtBuilder::default().build().execute_with(|| { - let scenarios: [(_, Box, _); 5] = [ - ("Eth fees too low", Box::new(|tx| tx.gas_price /= 2), InvalidTransaction::Payment), - ("Gas fees too high", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), - ("Gas fees too low", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), - ( - "Diff > 10%", - Box::new(|tx| tx.gas = tx.gas * 111 / 100), - InvalidTransaction::Call, - ), - ( - "Diff < 10%", - Box::new(|tx| { - tx.gas_price *= 2; - tx.gas = tx.gas * 89 / 100 - }), - InvalidTransaction::Call, - ), - ]; - - for (msg, update_tx, err) in scenarios { - let builder = - UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); - - assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); - } - }); + let scenarios: [(_, Box, _); 5] = [ + ( + "Eth fees too low", + Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() / 2); + }), + InvalidTransaction::Payment, + ), + ( + "Gas fees too high", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 2); + }), + InvalidTransaction::Call, + ), + ( + "Gas fees too low", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 2); + }), + InvalidTransaction::Call, + ), + ( + "Diff > 10%", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 111 / 100); + }), + InvalidTransaction::Call, + ), + ( + "Diff < 10%", + Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 2); + tx.gas = Some(tx.gas.unwrap() * 89 / 100); + }), + InvalidTransaction::Call, + ), + ]; + + for (msg, update_tx, err) in scenarios { + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); + + assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); + } } #[test] fn check_transaction_tip() { - ExtBuilder::default().build().execute_with(|| { - let (code, _) = compile_module("dummy").unwrap(); - let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) - .update(|tx| tx.gas_price = tx.gas_price * 103 / 100); - - let tx = &builder.tx; - let expected_tip = tx.gas_price * tx.gas - U256::from(GAS_PRICE) * tx.gas; - let (_, extra) = builder.check().unwrap(); - assert_eq!(U256::from(extra.1.tip()), expected_tip); - }); + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) + .update(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100); + log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price); + }); + + let tx = &builder.tx; + let expected_tip = + tx.gas_price.unwrap() * tx.gas.unwrap() - U256::from(GAS_PRICE) * tx.gas.unwrap(); + let (_, extra) = builder.check().unwrap(); + assert_eq!(U256::from(extra.1.tip()), expected_tip); } } diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 49c08166483e..b6f0e3ae1a81 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -562,6 +562,9 @@ pub struct Stack<'a, T: Config, E> { debug_message: Option<&'a mut DebugBuffer>, /// Transient storage used to store data, which is kept for the duration of a transaction. transient_storage: TransientStorage, + /// Whether or not actual transfer of funds should be performed. + /// This is set to `true` exclusively when we simulate a call through eth_transact. + skip_transfer: bool, /// No executable is held by the struct but influences its behaviour. _phantom: PhantomData, } @@ -777,6 +780,7 @@ where storage_meter: &'a mut storage::meter::Meter, value: U256, input_data: Vec, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> ExecResult { let dest = T::AddressMapper::to_account_id(&dest); @@ -786,6 +790,7 @@ where gas_meter, storage_meter, value, + skip_transfer, debug_message, )? { stack.run(executable, input_data).map(|_| stack.first_frame.last_frame_output) @@ -812,6 +817,7 @@ where value: U256, input_data: Vec, salt: Option<&[u8; 32]>, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result<(H160, ExecReturnValue), ExecError> { let (mut stack, executable) = Self::new( @@ -825,6 +831,7 @@ where gas_meter, storage_meter, value, + skip_transfer, debug_message, )? .expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE); @@ -853,6 +860,7 @@ where gas_meter, storage_meter, value.into(), + false, debug_message, ) .unwrap() @@ -869,6 +877,7 @@ where gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, value: U256, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result, ExecError> { origin.ensure_mapped()?; @@ -896,6 +905,7 @@ where frames: Default::default(), debug_message, transient_storage: TransientStorage::new(limits::TRANSIENT_STORAGE_BYTES), + skip_transfer, _phantom: Default::default(), }; @@ -1058,7 +1068,7 @@ where self.transient_storage.start_transaction(); - let do_transaction = || { + let do_transaction = || -> ExecResult { let caller = self.caller(); let frame = top_frame_mut!(self); @@ -1073,6 +1083,7 @@ where &frame.account_id, frame.contract_info.get(&frame.account_id), executable.code_info(), + self.skip_transfer, )?; // Needs to be incremented before calling into the code so that it is visible // in case of recursion. @@ -1096,11 +1107,8 @@ where let call_span = T::Debug::new_call_span(&contract_address, entry_point, &input_data); let output = T::Debug::intercept_call(&contract_address, entry_point, &input_data) - .unwrap_or_else(|| { - executable - .execute(self, entry_point, input_data) - .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee }) - })?; + .unwrap_or_else(|| executable.execute(self, entry_point, input_data)) + .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; call_span.after_call(&output); @@ -1125,7 +1133,10 @@ where // If a special limit was set for the sub-call, we enforce it here. // The sub-call will be rolled back in case the limit is exhausted. let contract = frame.contract_info.as_contract(); - frame.nested_storage.enforce_subcall_limit(contract)?; + frame + .nested_storage + .enforce_subcall_limit(contract) + .map_err(|e| ExecError { error: e, origin: ErrorOrigin::Callee })?; let account_id = T::AddressMapper::to_address(&frame.account_id); match (entry_point, delegated_code_hash) { @@ -2101,6 +2112,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ), Ok(_) @@ -2193,6 +2205,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ) .unwrap(); @@ -2233,6 +2246,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, )); @@ -2269,6 +2283,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ), ExecError { @@ -2286,6 +2301,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -2314,6 +2330,7 @@ mod tests { &mut storage_meter, 55u64.into(), vec![], + false, None, ) .unwrap(); @@ -2363,6 +2380,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2392,6 +2410,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2421,6 +2440,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![1, 2, 3, 4], + false, None, ); assert_matches!(result, Ok(_)); @@ -2457,6 +2477,7 @@ mod tests { min_balance.into(), vec![1, 2, 3, 4], Some(&[0; 32]), + false, None, ); assert_matches!(result, Ok(_)); @@ -2511,6 +2532,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ); @@ -2575,6 +2597,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2640,6 +2663,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2672,6 +2696,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -2709,6 +2734,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2735,6 +2761,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2779,6 +2806,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2805,6 +2833,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2831,6 +2860,7 @@ mod tests { &mut storage_meter, 1u64.into(), vec![0], + false, None, ); assert_matches!(result, Err(_)); @@ -2875,6 +2905,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2920,6 +2951,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2946,6 +2978,7 @@ mod tests { U256::zero(), // <- zero value vec![], Some(&[0; 32]), + false, None, ), Err(_) @@ -2981,6 +3014,7 @@ mod tests { min_balance.into(), vec![], Some(&[0 ;32]), + false, None, ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address @@ -3032,10 +3066,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - min_balance.into(), vec![], Some(&[0; 32]), + false, None, ), Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address @@ -3100,6 +3134,7 @@ mod tests { &mut storage_meter, (min_balance * 10).into(), vec![], + false, None, ), Ok(_) @@ -3180,6 +3215,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ), Ok(_) @@ -3223,6 +3259,7 @@ mod tests { 100u64.into(), vec![], Some(&[0; 32]), + false, None, ), Err(Error::::TerminatedInConstructor.into()) @@ -3287,6 +3324,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -3349,6 +3387,7 @@ mod tests { 10u64.into(), vec![], Some(&[0; 32]), + false, None, ); assert_matches!(result, Ok(_)); @@ -3373,7 +3412,7 @@ mod tests { true, false ), - >::TransferFailed + >::TransferFailed, ); exec_success() }); @@ -3395,6 +3434,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3426,6 +3466,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buffer), ) .unwrap(); @@ -3459,6 +3500,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buffer), ); assert!(result.is_err()); @@ -3492,6 +3534,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buf_after), ) .unwrap(); @@ -3525,6 +3568,7 @@ mod tests { &mut storage_meter, U256::zero(), CHARLIE_ADDR.as_bytes().to_vec(), + false, None, )); @@ -3537,6 +3581,7 @@ mod tests { &mut storage_meter, U256::zero(), BOB_ADDR.as_bytes().to_vec(), + false, None, ) .map_err(|e| e.error), @@ -3587,6 +3632,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ) .map_err(|e| e.error), @@ -3621,6 +3667,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3705,6 +3752,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3831,6 +3879,7 @@ mod tests { (min_balance * 100).into(), vec![], Some(&[0; 32]), + false, None, ) .ok(); @@ -3844,6 +3893,7 @@ mod tests { (min_balance * 100).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 1); @@ -3856,6 +3906,7 @@ mod tests { (min_balance * 200).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 2); @@ -3868,6 +3919,7 @@ mod tests { (min_balance * 200).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 3); @@ -3936,6 +3988,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4047,6 +4100,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4086,6 +4140,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4125,6 +4180,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4178,6 +4234,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4234,6 +4291,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4309,6 +4367,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4379,6 +4438,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4417,6 +4477,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4479,6 +4540,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4512,6 +4574,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -4595,6 +4658,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4663,6 +4727,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4734,6 +4799,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -4785,6 +4851,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4854,6 +4921,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4900,6 +4968,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4944,6 +5013,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4999,6 +5069,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ), Ok(_) diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index caecf07c4071..b9a39e7ce4d3 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -41,13 +41,13 @@ pub mod test_utils; pub mod weights; use crate::{ - evm::{runtime::GAS_PRICE, TransactionLegacyUnsigned}, + evm::{runtime::GAS_PRICE, GenericTransaction}, exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, wasm::{CodeInfo, RuntimeCosts, WasmBlob}, }; -use alloc::boxed::Box; +use alloc::{boxed::Box, format, vec}; use codec::{Codec, Decode, Encode}; use environmental::*; use frame_support::{ @@ -74,7 +74,7 @@ use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; use sp_core::{H160, H256, U256}; use sp_runtime::{ - traits::{BadOrigin, Convert, Dispatchable, Saturating, Zero}, + traits::{BadOrigin, Bounded, Convert, Dispatchable, Saturating, Zero}, DispatchError, }; @@ -115,19 +115,6 @@ const SENTINEL: u32 = u32::MAX; /// Example: `RUST_LOG=runtime::revive=debug my_code --dev` const LOG_TARGET: &str = "runtime::revive"; -/// This version determines which syscalls are available to contracts. -/// -/// Needs to be bumped every time a versioned syscall is added. -const API_VERSION: u16 = 0; - -#[test] -fn api_version_up_to_date() { - assert!( - API_VERSION == crate::wasm::HIGHEST_API_VERSION, - "A new versioned API has been added. The `API_VERSION` needs to be bumped." - ); -} - #[frame_support::pallet] pub mod pallet { use super::*; @@ -623,14 +610,6 @@ pub mod pallet { #[pallet::storage] pub(crate) type AddressSuffix = StorageMap<_, Identity, H160, [u8; 12]>; - #[pallet::extra_constants] - impl Pallet { - #[pallet::constant_name(ApiVersion)] - fn api_version() -> u16 { - API_VERSION - } - } - #[pallet::hooks] impl Hooks> for Pallet { fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { @@ -768,7 +747,7 @@ pub mod pallet { /// /// # Parameters /// - /// * `payload`: The RLP-encoded [`crate::evm::TransactionLegacySigned`]. + /// * `payload`: The encoded [`crate::evm::TransactionSigned`]. /// * `gas_limit`: The gas limit enforced during contract execution. /// * `storage_deposit_limit`: The maximum balance that can be charged to the caller for /// storage usage. @@ -823,7 +802,7 @@ pub mod pallet { dest, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), data, DebugInfo::Skip, CollectEvents::Skip, @@ -859,7 +838,7 @@ pub mod pallet { origin, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), Code::Existing(code_hash), data, salt, @@ -925,7 +904,7 @@ pub mod pallet { origin, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), Code::Upload(code), data, salt, @@ -1083,7 +1062,7 @@ fn dispatch_result( impl Pallet where - BalanceOf: Into + TryFrom, + BalanceOf: Into + TryFrom + Bounded, MomentOf: Into, T::Hash: frame_support::traits::IsType, { @@ -1098,7 +1077,7 @@ where dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, data: Vec, debug: DebugInfo, collect_events: CollectEvents, @@ -1112,7 +1091,10 @@ where }; let try_call = || { let origin = Origin::from_runtime_origin(origin)?; - let mut storage_meter = StorageMeter::new(&origin, storage_deposit_limit, value)?; + let mut storage_meter = match storage_deposit_limit { + DepositLimit::Balance(limit) => StorageMeter::new(&origin, limit, value)?, + DepositLimit::Unchecked => StorageMeter::new_unchecked(BalanceOf::::max_value()), + }; let result = ExecStack::>::run_call( origin.clone(), dest, @@ -1120,9 +1102,14 @@ where &mut storage_meter, Self::convert_native_to_evm(value), data, + storage_deposit_limit.is_unchecked(), debug_message.as_mut(), )?; - storage_deposit = storage_meter.try_into_deposit(&origin)?; + storage_deposit = storage_meter + .try_into_deposit(&origin, storage_deposit_limit.is_unchecked()) + .inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to transfer deposit: {err:?}"); + })?; Ok(result) }; let result = Self::run_guarded(try_call); @@ -1151,7 +1138,7 @@ where origin: OriginFor, value: BalanceOf, gas_limit: Weight, - mut storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, code: Code, data: Vec, salt: Option<[u8; 32]>, @@ -1162,13 +1149,24 @@ where let mut storage_deposit = Default::default(); let mut debug_message = if debug == DebugInfo::UnsafeDebug { Some(DebugBuffer::default()) } else { None }; + + let unchecked_deposit_limit = storage_deposit_limit.is_unchecked(); + let mut storage_deposit_limit = match storage_deposit_limit { + DepositLimit::Balance(limit) => limit, + DepositLimit::Unchecked => BalanceOf::::max_value(), + }; + let try_instantiate = || { let instantiate_account = T::InstantiateOrigin::ensure_origin(origin.clone())?; let (executable, upload_deposit) = match code { Code::Upload(code) => { let upload_account = T::UploadOrigin::ensure_origin(origin)?; - let (executable, upload_deposit) = - Self::try_upload_code(upload_account, code, storage_deposit_limit)?; + let (executable, upload_deposit) = Self::try_upload_code( + upload_account, + code, + storage_deposit_limit, + unchecked_deposit_limit, + )?; storage_deposit_limit.saturating_reduce(upload_deposit); (executable, upload_deposit) }, @@ -1176,8 +1174,12 @@ where (WasmBlob::from_storage(code_hash, &mut gas_meter)?, Default::default()), }; let instantiate_origin = Origin::from_account_id(instantiate_account.clone()); - let mut storage_meter = - StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)?; + let mut storage_meter = if unchecked_deposit_limit { + StorageMeter::new_unchecked(storage_deposit_limit) + } else { + StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)? + }; + let result = ExecStack::>::run_instantiate( instantiate_account, executable, @@ -1186,10 +1188,11 @@ where Self::convert_native_to_evm(value), data, salt.as_ref(), + unchecked_deposit_limit, debug_message.as_mut(), ); storage_deposit = storage_meter - .try_into_deposit(&instantiate_origin)? + .try_into_deposit(&instantiate_origin, unchecked_deposit_limit)? .saturating_add(&StorageDeposit::Charge(upload_deposit)); result }; @@ -1215,28 +1218,15 @@ where /// /// # Parameters /// - /// - `origin`: The origin of the call. - /// - `dest`: The destination address of the call. - /// - `value`: The EVM value to transfer. - /// - `input`: The input data. + /// - `tx`: The Ethereum transaction to simulate. /// - `gas_limit`: The gas limit enforced during contract execution. - /// - `storage_deposit_limit`: The maximum balance that can be charged to the caller for storage - /// usage. /// - `utx_encoded_size`: A function that takes a call and returns the encoded size of the /// unchecked extrinsic. - /// - `debug`: Debugging configuration. - /// - `collect_events`: Event collection configuration. pub fn bare_eth_transact( - origin: T::AccountId, - dest: Option, - value: U256, - input: Vec, + mut tx: GenericTransaction, gas_limit: Weight, - storage_deposit_limit: BalanceOf, utx_encoded_size: impl Fn(Call) -> u32, - debug: DebugInfo, - collect_events: CollectEvents, - ) -> EthContractResult> + ) -> Result>, EthTransactError> where T: pallet_transaction_payment::Config, ::RuntimeCall: @@ -1247,26 +1237,58 @@ where T::Nonce: Into, T::Hash: frame_support::traits::IsType, { - log::debug!(target: LOG_TARGET, "bare_eth_transact: dest: {dest:?} value: {value:?} - gas_limit: {gas_limit:?} storage_deposit_limit: {storage_deposit_limit:?}"); + log::debug!(target: LOG_TARGET, "bare_eth_transact: tx: {tx:?} gas_limit: {gas_limit:?}"); + + let from = tx.from.unwrap_or_default(); + let origin = T::AddressMapper::to_account_id(&from); + + let storage_deposit_limit = if tx.gas.is_some() { + DepositLimit::Balance(BalanceOf::::max_value()) + } else { + DepositLimit::Unchecked + }; - // Get the nonce to encode in the tx. - let nonce: T::Nonce = >::account_nonce(&origin); + // TODO remove once we have revisited how we encode the gas limit. + if tx.nonce.is_none() { + tx.nonce = Some(>::account_nonce(&origin).into()); + } + if tx.gas_price.is_none() { + tx.gas_price = Some(GAS_PRICE.into()); + } + if tx.chain_id.is_none() { + tx.chain_id = Some(T::ChainId::get().into()); + } // Convert the value to the native balance type. - let native_value = match Self::convert_evm_to_native(value) { + let evm_value = tx.value.unwrap_or_default(); + let native_value = match Self::convert_evm_to_native(evm_value) { Ok(v) => v, - Err(err) => - return EthContractResult { - gas_required: Default::default(), - storage_deposit: Default::default(), - fee: Default::default(), - result: Err(err.into()), - }, + Err(_) => return Err(EthTransactError::Message("Failed to convert value".into())), + }; + + let input = tx.input.clone().unwrap_or_default().0; + let debug = DebugInfo::Skip; + let collect_events = CollectEvents::Skip; + + let extract_error = |err| { + if err == Error::::TransferFailed.into() || + err == Error::::StorageDepositNotEnoughFunds.into() || + err == Error::::StorageDepositLimitExhausted.into() + { + let balance = Self::evm_balance(&from); + return Err(EthTransactError::Message( + format!("insufficient funds for gas * price + value: address {from:?} have {balance} (supplied gas {})", + tx.gas.unwrap_or_default())) + ); + } + + return Err(EthTransactError::Message(format!( + "Failed to instantiate contract: {err:?}" + ))); }; // Dry run the call - let (mut result, dispatch_info) = match dest { + let (mut result, dispatch_info) = match tx.to { // A contract call. Some(dest) => { // Dry run the call. @@ -1281,11 +1303,24 @@ where collect_events, ); - let result = EthContractResult { + let data = match result.result { + Ok(return_value) => { + if return_value.did_revert() { + return Err(EthTransactError::Data(return_value.data)); + } + return_value.data + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to execute call: {err:?}"); + return extract_error(err) + }, + }; + + let result = EthTransactInfo { gas_required: result.gas_required, storage_deposit: result.storage_deposit.charge_or_zero(), - result: result.result, - fee: Default::default(), + data, + eth_gas: Default::default(), }; // Get the dispatch info of the call. let dispatch_call: ::RuntimeCall = crate::Call::::call { @@ -1326,11 +1361,24 @@ where collect_events, ); - let result = EthContractResult { + let returned_data = match result.result { + Ok(return_value) => { + if return_value.result.did_revert() { + return Err(EthTransactError::Data(return_value.result.data)); + } + return_value.result.data + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to instantiate: {err:?}"); + return extract_error(err) + }, + }; + + let result = EthTransactInfo { gas_required: result.gas_required, storage_deposit: result.storage_deposit.charge_or_zero(), - result: result.result.map(|v| v.result), - fee: Default::default(), + data: returned_data, + eth_gas: Default::default(), }; // Get the dispatch info of the call. @@ -1348,23 +1396,18 @@ where }, }; - let mut tx = TransactionLegacyUnsigned { - value, - input: input.into(), - nonce: nonce.into(), - chain_id: Some(T::ChainId::get().into()), - gas_price: GAS_PRICE.into(), - to: dest, - ..Default::default() - }; - // The transaction fees depend on the extrinsic's length, which in turn is influenced by // the encoded length of the gas limit specified in the transaction (tx.gas). // We iteratively compute the fee by adjusting tx.gas until the fee stabilizes. // with a maximum of 3 iterations to avoid an infinite loop. for _ in 0..3 { + let Ok(unsigned_tx) = tx.clone().try_into_unsigned() else { + log::debug!(target: LOG_TARGET, "Failed to convert to unsigned"); + return Err(EthTransactError::Message("Invalid transaction".into())); + }; + let eth_dispatch_call = crate::Call::::eth_transact { - payload: tx.dummy_signed_payload(), + payload: unsigned_tx.dummy_signed_payload(), gas_limit: result.gas_required, storage_deposit_limit: result.storage_deposit, }; @@ -1375,17 +1418,18 @@ where 0u32.into(), ) .into(); + let eth_gas: U256 = (fee / GAS_PRICE.into()).into(); - if fee == result.fee { - log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} fee: {fee:?}"); + if eth_gas == result.eth_gas { + log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}"); break; } - result.fee = fee; - tx.gas = (fee / GAS_PRICE.into()).into(); - log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {:?}", tx.gas); + result.eth_gas = eth_gas; + tx.gas = Some(eth_gas.into()); + log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {eth_gas:?}"); } - result + Ok(result) } /// Get the balance with EVM decimals of the given `address`. @@ -1403,7 +1447,7 @@ where storage_deposit_limit: BalanceOf, ) -> CodeUploadResult> { let origin = T::UploadOrigin::ensure_origin(origin)?; - let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit)?; + let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit, false)?; Ok(CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }) } @@ -1421,9 +1465,10 @@ where origin: T::AccountId, code: Vec, storage_deposit_limit: BalanceOf, + skip_transfer: bool, ) -> Result<(WasmBlob, BalanceOf), DispatchError> { let mut module = WasmBlob::from_code(code, origin)?; - let deposit = module.store_code()?; + let deposit = module.store_code(skip_transfer)?; ensure!(storage_deposit_limit >= deposit, >::StorageDepositLimitExhausted); Ok((module, deposit)) } @@ -1527,14 +1572,7 @@ sp_api::decl_runtime_apis! { /// Perform an Ethereum call. /// /// See [`crate::Pallet::bare_eth_transact`] - fn eth_transact( - origin: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> EthContractResult; + fn eth_transact(tx: GenericTransaction) -> Result, EthTransactError>; /// Upload new code without instantiating a contract from it. /// diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index 64e66382b9ab..2e112baae301 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -116,7 +116,10 @@ pub mod code { const BASIC_BLOCK_SIZE: u32 = 1000; /// Make sure that the various program parts are within the defined limits. - pub fn enforce(blob: Vec) -> Result { + pub fn enforce( + blob: Vec, + available_syscalls: &[&[u8]], + ) -> Result { fn round_page(n: u32) -> u64 { // performing the rounding in u64 in order to prevent overflow u64::from(n).next_multiple_of(PAGE_SIZE.into()) @@ -129,23 +132,56 @@ pub mod code { Error::::CodeRejected })?; + if !program.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + + // Need to check that no non-existent syscalls are used. This allows us to add + // new syscalls later without affecting already deployed code. + for (idx, import) in program.imports().iter().enumerate() { + // We are being defensive in case an attacker is able to somehow include + // a lot of imports. This is important because we search the array of host + // functions for every import. + if idx == available_syscalls.len() { + log::debug!(target: LOG_TARGET, "Program contains too many imports."); + Err(Error::::CodeRejected)?; + } + let Some(import) = import else { + log::debug!(target: LOG_TARGET, "Program contains malformed import."); + return Err(Error::::CodeRejected.into()); + }; + if !available_syscalls.contains(&import.as_bytes()) { + log::debug!(target: LOG_TARGET, "Program references unknown syscall: {}", import); + Err(Error::::CodeRejected)?; + } + } + // This scans the whole program but we only do it once on code deployment. // It is safe to do unchecked math in u32 because the size of the program // was already checked above. - use polkavm::program::ISA32_V1_NoSbrk as ISA; + use polkavm::program::ISA64_V1 as ISA; let mut num_instructions: u32 = 0; let mut max_basic_block_size: u32 = 0; let mut basic_block_size: u32 = 0; for inst in program.instructions(ISA) { + use polkavm::program::Instruction; num_instructions += 1; basic_block_size += 1; if inst.kind.opcode().starts_new_basic_block() { max_basic_block_size = max_basic_block_size.max(basic_block_size); basic_block_size = 0; } - if matches!(inst.kind, polkavm::program::Instruction::invalid) { - log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); - return Err(>::InvalidInstruction.into()) + match inst.kind { + Instruction::invalid => { + log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + Instruction::sbrk(_, _) => { + log::debug!(target: LOG_TARGET, "sbrk instruction is not allowed. offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + _ => (), } } diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index 024b1f3448e1..a7127f812b4b 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -17,8 +17,8 @@ //! A crate that hosts a common definitions that are relevant for the pallet-revive. -use crate::H160; -use alloc::vec::Vec; +use crate::{H160, U256}; +use alloc::{string::String, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::weights::Weight; use pallet_revive_uapi::ReturnFlags; @@ -28,6 +28,30 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum DepositLimit { + /// Allows bypassing all balance transfer checks. + Unchecked, + + /// Specifies a maximum allowable balance for a deposit. + Balance(Balance), +} + +impl DepositLimit { + pub fn is_unchecked(&self) -> bool { + match self { + Self::Unchecked => true, + _ => false, + } + } +} + +impl From for DepositLimit { + fn from(value: T) -> Self { + Self::Balance(value) + } +} + /// Result type of a `bare_call` or `bare_instantiate` call as well as `ContractsApi::call` and /// `ContractsApi::instantiate`. /// @@ -84,15 +108,22 @@ pub struct ContractResult { /// The result of the execution of a `eth_transact` call. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct EthContractResult> { - /// The fee charged for the execution. - pub fee: Balance, +pub struct EthTransactInfo { /// The amount of gas that was necessary to execute the transaction. pub gas_required: Weight, /// Storage deposit charged. pub storage_deposit: Balance, - /// The execution result. - pub result: R, + /// The weight and deposit equivalent in EVM Gas. + pub eth_gas: U256, + /// The execution return value. + pub data: Vec, +} + +/// Error type of a `eth_transact` call. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum EthTransactError { + Data(Vec), + Message(String), } /// Result type of a `bare_code_upload` call. diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs index 712010bc8257..6eddf048be98 100644 --- a/substrate/frame/revive/src/storage/meter.rs +++ b/substrate/frame/revive/src/storage/meter.rs @@ -373,24 +373,36 @@ where } } + /// Create new storage meter without checking the limit. + pub fn new_unchecked(limit: BalanceOf) -> Self { + return Self { limit, ..Default::default() } + } + /// The total amount of deposit that should change hands as result of the execution /// that this meter was passed into. This will also perform all the charges accumulated /// in the whole contract stack. /// /// This drops the root meter in order to make sure it is only called when the whole /// execution did finish. - pub fn try_into_deposit(self, origin: &Origin) -> Result, DispatchError> { - // Only refund or charge deposit if the origin is not root. - let origin = match origin { - Origin::Root => return Ok(Deposit::Charge(Zero::zero())), - Origin::Signed(o) => o, - }; - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; - } - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + pub fn try_into_deposit( + self, + origin: &Origin, + skip_transfer: bool, + ) -> Result, DispatchError> { + if !skip_transfer { + // Only refund or charge deposit if the origin is not root. + let origin = match origin { + Origin::Root => return Ok(Deposit::Charge(Zero::zero())), + Origin::Signed(o) => o, + }; + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } } + Ok(self.total_deposit) } } @@ -425,13 +437,18 @@ impl> RawMeter { contract: &T::AccountId, contract_info: &mut ContractInfo, code_info: &CodeInfo, + skip_transfer: bool, ) -> Result<(), DispatchError> { debug_assert!(matches!(self.contract_state(), ContractState::Alive)); // We need to make sure that the contract's account exists. let ed = Pallet::::min_balance(); self.total_deposit = Deposit::Charge(ed); - T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; + if skip_transfer { + T::Currency::set_balance(contract, ed); + } else { + T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; + } // A consumer is added at account creation and removed it on termination, otherwise the // runtime could remove the account. As long as a contract exists its account must exist. @@ -479,6 +496,7 @@ impl> RawMeter { } if let Deposit::Charge(amount) = total_deposit { if amount > self.limit { + log::debug!( target: LOG_TARGET, "Storage deposit limit exhausted: {:?} > {:?}", amount, self.limit); return Err(>::StorageDepositLimitExhausted.into()) } } @@ -811,7 +829,10 @@ mod tests { nested0.enforce_limit(Some(&mut nested0_info)).unwrap(); meter.absorb(nested0, &BOB, Some(&mut nested0_info)); - assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); + assert_eq!( + meter.try_into_deposit(&test_case.origin, false).unwrap(), + test_case.deposit + ); assert_eq!(nested0_info.extra_deposit(), 112); assert_eq!(nested1_info.extra_deposit(), 110); @@ -882,7 +903,10 @@ mod tests { nested0.absorb(nested1, &CHARLIE, None); meter.absorb(nested0, &BOB, None); - assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); + assert_eq!( + meter.try_into_deposit(&test_case.origin, false).unwrap(), + test_case.deposit + ); assert_eq!(TestExtTestValue::get(), test_case.expected) } } diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index e64f58894432..8ba5e7384070 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -18,7 +18,8 @@ use super::{deposit_limit, GAS_LIMIT}; use crate::{ address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, ContractResult, - DebugInfo, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight, + DebugInfo, DepositLimit, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, + Pallet, Weight, }; use frame_support::pallet_prelude::DispatchResultWithPostInfo; use paste::paste; @@ -133,7 +134,7 @@ builder!( origin: OriginFor, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, code: Code, data: Vec, salt: Option<[u8; 32]>, @@ -159,7 +160,7 @@ builder!( origin, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: deposit_limit::(), + storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), code, data: vec![], salt: Some([0; 32]), @@ -198,7 +199,7 @@ builder!( dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, data: Vec, debug: DebugInfo, collect_events: CollectEvents, @@ -216,7 +217,7 @@ builder!( dest, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: deposit_limit::(), + storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), data: vec![], debug: DebugInfo::UnsafeDebug, collect_events: CollectEvents::Skip, diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 177b8dff706b..27ec7948e8fd 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -29,6 +29,7 @@ use crate::{ ChainExtension, Environment, Ext, RegisteredChainExtension, Result as ExtensionResult, RetVal, ReturnFlags, }, + evm::GenericTransaction, exec::Key, limits, primitives::CodeUploadReturnValue, @@ -38,8 +39,8 @@ use crate::{ wasm::Memory, weights::WeightInfo, AccountId32Mapper, BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo, - ContractInfoOf, DebugInfo, DeletionQueueCounter, Error, HoldReason, Origin, Pallet, - PristineCode, H160, + ContractInfoOf, DebugInfo, DeletionQueueCounter, DepositLimit, Error, EthTransactError, + HoldReason, Origin, Pallet, PristineCode, H160, }; use crate::test_utils::builder::Contract; @@ -416,6 +417,7 @@ impl pallet_proxy::Config for Test { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -1210,14 +1212,11 @@ fn delegate_call_with_deposit_limit() { // Delegate call will write 1 storage and deposit of 2 (1 item) + 32 (bytes) is required. // Fails, not enough deposit - assert_err!( - builder::bare_call(caller_addr) - .value(1337) - .data((callee_addr, 33u64).encode()) - .build() - .result, - Error::::StorageDepositLimitExhausted, - ); + let ret = builder::bare_call(caller_addr) + .value(1337) + .data((callee_addr, 33u64).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); assert_ok!(builder::call(caller_addr) .value(1337) @@ -1248,7 +1247,7 @@ fn transfer_expendable_cannot_kill_account() { test_utils::contract_info_storage_deposit(&addr) ); - // Some ot the total balance is held, so it can't be transferred. + // Some or the total balance is held, so it can't be transferred. assert_err!( <::Currency as Mutate>::transfer( &account, @@ -1677,8 +1676,8 @@ fn instantiate_return_code() { // Contract has enough balance but the passed code hash is invalid ::Currency::set_balance(&contract.account_id, min_balance + 10_000); - let result = builder::bare_call(contract.addr).data(vec![0; 33]).build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::CodeNotFound); + let result = builder::bare_call(contract.addr).data(vec![0; 33]).build(); + assert_err!(result.result, >::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. let result = builder::bare_call(contract.addr) @@ -2289,7 +2288,7 @@ fn gas_estimation_for_subcalls() { // Make the same call using the estimated gas. Should succeed. let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_ok!(&result.result); @@ -2297,7 +2296,7 @@ fn gas_estimation_for_subcalls() { // Check that it fails with too little ref_time let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required.sub_ref_time(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_err!(result.result, error); @@ -2305,7 +2304,7 @@ fn gas_estimation_for_subcalls() { // Check that it fails with too little proof_size let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required.sub_proof_size(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_err!(result.result, error); @@ -3462,13 +3461,11 @@ fn deposit_limit_in_nested_calls() { // nested call. This should fail as callee adds up 2 bytes to the storage, meaning // that the nested call should have a deposit limit of at least 2 Balance. The // sub-call should be rolled back, which is covered by the next test case. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .storage_deposit_limit(16) - .data((102u32, &addr_callee, U256::from(1u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); + let ret = builder::bare_call(addr_caller) + .storage_deposit_limit(DepositLimit::Balance(16)) + .data((102u32, &addr_callee, U256::from(1u64)).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); // Refund in the callee contract but not enough to cover the 14 Balance required by the // caller. Note that if previous sub-call wouldn't roll back, this call would pass @@ -3484,13 +3481,11 @@ fn deposit_limit_in_nested_calls() { let _ = ::Currency::set_balance(&ALICE, 511); // Require more than the sender's balance. - // We don't set a special limit for the nested call. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .data((512u32, &addr_callee, U256::from(1u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); + // Limit the sub call to little balance so it should fail in there + let ret = builder::bare_call(addr_caller) + .data((512u32, &addr_callee, U256::from(1u64)).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); // Same as above but allow for the additional deposit of 1 Balance in parent. // We set the special deposit limit of 1 Balance for the nested call, which isn't @@ -3562,14 +3557,12 @@ fn deposit_limit_in_nested_instantiate() { // Now we set enough limit in parent call, but an insufficient limit for child // instantiate. This should fail during the charging for the instantiation in // `RawMeter::charge_instantiate()` - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 2) - .data((0u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 1)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); + let ret = builder::bare_call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(DepositLimit::Balance(callee_info_len + 2 + ED + 2)) + .data((0u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 1)).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); // The charges made on the instantiation should be rolled back. assert_eq!(::Currency::free_balance(&BOB), 1_000_000); @@ -3577,21 +3570,19 @@ fn deposit_limit_in_nested_instantiate() { // item of 1 byte to be covered by the limit, which implies 3 more Balance. // Now we set enough limit for the parent call, but insufficient limit for child // instantiate. This should fail right after the constructor execution. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 3) // enough parent limit - .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 2)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); + let ret = builder::bare_call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(DepositLimit::Balance(callee_info_len + 2 + ED + 3)) // enough parent limit + .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 2)).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); // The charges made on the instantiation should be rolled back. assert_eq!(::Currency::free_balance(&BOB), 1_000_000); // Set enough deposit limit for the child instantiate. This should succeed. let result = builder::bare_call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 4 + 2) + .storage_deposit_limit((callee_info_len + 2 + ED + 4 + 2).into()) .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 3 + 2)).encode()) .build(); @@ -3878,7 +3869,7 @@ fn locking_delegate_dependency_works() { // Locking a dependency with a storage limit too low should fail. assert_err!( builder::bare_call(addr_caller) - .storage_deposit_limit(dependency_deposit - 1) + .storage_deposit_limit((dependency_deposit - 1).into()) .data((1u32, hash2addr(&callee_hashes[0]), callee_hashes[0]).encode()) .build() .result, @@ -3889,7 +3880,7 @@ fn locking_delegate_dependency_works() { assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE_FALLBACK), callee_hashes[0])); // Calling should fail since the delegated contract is not on chain anymore. - assert_err!(call(&addr_caller, &noop_input).result, Error::::ContractTrapped); + assert_err!(call(&addr_caller, &noop_input).result, Error::::CodeNotFound); // Add the dependency back. Contracts::upload_code( @@ -4351,6 +4342,28 @@ fn create1_with_value_works() { }); } +#[test] +fn call_data_size_api_works() { + let (code, _) = compile_module("call_data_size").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + // Call the contract: It echoes back the value returned by the call data size API. + let received = builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::zero()); + + let received = builder::bare_call(addr).data(vec![1; 256]).build_and_unwrap_result(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::from(256)); + }); +} + #[test] fn static_data_limit_is_enforced() { let (oom_rw_trailing, _) = compile_module("oom_rw_trailing").unwrap(); @@ -4416,6 +4429,48 @@ fn chain_id_works() { }); } +#[test] +fn call_data_load_api_works() { + let (code, _) = compile_module("call_data_load").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + // Call the contract: It reads a byte for the offset and then returns + // what call data load returned using this byte as the offset. + let input = (3u8, U256::max_value(), U256::max_value()).encode(); + let received = builder::bare_call(addr).data(input).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::max_value()); + + // Edge case + let input = (2u8, U256::from(255).to_big_endian()).encode(); + let received = builder::bare_call(addr).data(input).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::from(65280)); + + // Edge case + let received = builder::bare_call(addr).data(vec![1]).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::zero()); + + // OOB case + let input = (42u8).encode(); + let received = builder::bare_call(addr).data(input).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::zero()); + + // No calldata should return the zero value + let received = builder::bare_call(addr).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::zero()); + }); +} + #[test] fn return_data_api_works() { let (code_return_data_api, _) = compile_module("return_data_api").unwrap(); @@ -4665,3 +4720,132 @@ fn mapped_address_works() { assert_eq!(::Currency::total_balance(&EVE), 1_100); }); } + +#[test] +fn skip_transfer_works() { + let (code_caller, _) = compile_module("call").unwrap(); + let (code, _) = compile_module("set_empty_storage").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + ::Currency::set_balance(&BOB, 0); + + // fails to instantiate when gas is specified. + assert_err!( + Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + input: Some(code.clone().into()), + gas: Some(1u32.into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + ), + EthTransactError::Message(format!( + "insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)" + )) + ); + + // works when no gas is specified. + assert_ok!(Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(ALICE_ADDR), + input: Some(code.clone().into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + )); + + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + let Contract { addr: caller_addr, .. } = + builder::bare_instantiate(Code::Upload(code_caller)).build_and_unwrap_contract(); + + // fails to call when gas is specified. + assert_err!( + Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(addr), + gas: Some(1u32.into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + ), + EthTransactError::Message(format!( + "insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)" + )) + ); + + // fails when calling from a contract when gas is specified. + assert_err!( + Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(caller_addr), + input: Some((0u32, &addr).encode().into()), + gas: Some(1u32.into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + ), + EthTransactError::Message(format!("insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)")) + ); + + // works when no gas is specified. + assert_ok!(Pallet::::bare_eth_transact( + GenericTransaction { from: Some(BOB_ADDR), to: Some(addr), ..Default::default() }, + Weight::MAX, + |_| 0u32 + )); + + // works when calling from a contract when no gas is specified. + assert_ok!(Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(caller_addr), + input: Some((0u32, &addr).encode().into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + )); + }); +} + +#[test] +fn unknown_syscall_rejected() { + let (code, _) = compile_module("unknown_syscall").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + + assert_err!( + builder::bare_instantiate(Code::Upload(code)).build().result, + >::CodeRejected, + ) + }); +} + +#[test] +fn unstable_interface_rejected() { + let (code, _) = compile_module("unstable_interface").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + + Test::set_unstable_interface(false); + assert_err!( + builder::bare_instantiate(Code::Upload(code.clone())).build().result, + >::CodeRejected, + ); + + Test::set_unstable_interface(true); + assert_ok!(builder::bare_instantiate(Code::Upload(code)).build().result); + }); +} diff --git a/substrate/frame/revive/src/tests/test_debug.rs b/substrate/frame/revive/src/tests/test_debug.rs index 7c4fbba71f65..c9e19e52ace1 100644 --- a/substrate/frame/revive/src/tests/test_debug.rs +++ b/substrate/frame/revive/src/tests/test_debug.rs @@ -21,6 +21,7 @@ use crate::{ debug::{CallInterceptor, CallSpan, ExecResult, ExportedFunction, Tracing}, primitives::ExecReturnValue, test_utils::*, + DepositLimit, }; use frame_support::traits::Currency; use pretty_assertions::assert_eq; @@ -114,7 +115,7 @@ fn debugging_works() { RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, - deposit_limit::(), + DepositLimit::Balance(deposit_limit::()), Code::Upload(wasm), vec![], Some([0u8; 32]), @@ -198,7 +199,7 @@ fn call_interception_works() { RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, - deposit_limit::(), + deposit_limit::().into(), Code::Upload(wasm), vec![], // some salt to ensure that the address of this contract is unique among all tests diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index f10c4f5fddf8..e963895dafae 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -23,13 +23,10 @@ mod runtime; #[cfg(doc)] pub use crate::wasm::runtime::SyscallDoc; -#[cfg(test)] -pub use runtime::HIGHEST_API_VERSION; - #[cfg(feature = "runtime-benchmarks")] pub use crate::wasm::runtime::{ReturnData, TrapReason}; -pub use crate::wasm::runtime::{ApiVersion, Memory, Runtime, RuntimeCosts}; +pub use crate::wasm::runtime::{Memory, Runtime, RuntimeCosts}; use crate::{ address::AddressMapper, @@ -39,7 +36,7 @@ use crate::{ storage::meter::Diff, weights::WeightInfo, AccountIdOf, BadOrigin, BalanceOf, CodeInfoOf, CodeVec, Config, Error, Event, ExecError, - HoldReason, Pallet, PristineCode, Weight, API_VERSION, LOG_TARGET, + HoldReason, Pallet, PristineCode, Weight, LOG_TARGET, }; use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; @@ -87,11 +84,6 @@ pub struct CodeInfo { refcount: u64, /// Length of the code in bytes. code_len: u32, - /// The API version that this contract operates under. - /// - /// This determines which host functions are available to the contract. This - /// prevents that new host functions become available to already deployed contracts. - api_version: u16, /// The behaviour version that this contract operates under. /// /// Whenever any observeable change (with the exception of weights) are made we need @@ -99,7 +91,7 @@ pub struct CodeInfo { /// exposing the old behaviour depending on the set behaviour version of the contract. /// /// As of right now this is a reserved field that is always set to 0. - behaviour_version: u16, + behaviour_version: u32, } impl ExportedFunction { @@ -130,9 +122,10 @@ where { /// We only check for size and nothing else when the code is uploaded. pub fn from_code(code: Vec, owner: AccountIdOf) -> Result { - // We do size checks when new code is deployed. This allows us to increase + // We do validation only when new code is deployed. This allows us to increase // the limits later without affecting already deployed code. - let code = limits::code::enforce::(code)?; + let available_syscalls = runtime::list_syscalls(T::UnsafeUnstableInterface::get()); + let code = limits::code::enforce::(code, available_syscalls)?; let code_len = code.len() as u32; let bytes_added = code_len.saturating_add(>::max_encoded_len() as u32); @@ -144,7 +137,6 @@ where deposit, refcount: 0, code_len, - api_version: API_VERSION, behaviour_version: Default::default(), }; let code_hash = H256(sp_io::hashing::keccak_256(&code)); @@ -183,7 +175,7 @@ where } /// Puts the module blob into storage, and returns the deposit collected for the storage. - pub fn store_code(&mut self) -> Result, Error> { + pub fn store_code(&mut self, skip_transfer: bool) -> Result, Error> { let code_hash = *self.code_hash(); >::mutate(code_hash, |stored_code_info| { match stored_code_info { @@ -195,15 +187,16 @@ where // the `owner` is always the origin of the current transaction. None => { let deposit = self.code_info.deposit; - T::Currency::hold( + + if !skip_transfer { + T::Currency::hold( &HoldReason::CodeUploadDepositReserve.into(), &self.code_info.owner, deposit, - ) - .map_err(|err| { - log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); + ) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); >::StorageDepositNotEnoughFunds })?; + } self.code_info.refcount = 0; >::insert(code_hash, &self.code); @@ -229,7 +222,6 @@ impl CodeInfo { deposit: Default::default(), refcount: 0, code_len: 0, - api_version: API_VERSION, behaviour_version: Default::default(), } } @@ -259,7 +251,6 @@ pub struct PreparedCall<'a, E: Ext> { module: polkavm::Module, instance: polkavm::RawInstance, runtime: Runtime<'a, E, polkavm::RawInstance>, - api_version: ApiVersion, } impl<'a, E: Ext> PreparedCall<'a, E> @@ -270,12 +261,9 @@ where pub fn call(mut self) -> ExecResult { let exec_result = loop { let interrupt = self.instance.run(); - if let Some(exec_result) = self.runtime.handle_interrupt( - interrupt, - &self.module, - &mut self.instance, - self.api_version, - ) { + if let Some(exec_result) = + self.runtime.handle_interrupt(interrupt, &self.module, &mut self.instance) + { break exec_result } }; @@ -289,12 +277,18 @@ impl WasmBlob { self, mut runtime: Runtime, entry_point: ExportedFunction, - api_version: ApiVersion, ) -> Result, ExecError> { let mut config = polkavm::Config::default(); config.set_backend(Some(polkavm::BackendKind::Interpreter)); - let engine = - polkavm::Engine::new(&config).expect("interpreter is available on all plattforms; qed"); + config.set_cache_enabled(false); + #[cfg(feature = "std")] + if std::env::var_os("REVIVE_USE_COMPILER").is_some() { + config.set_backend(Some(polkavm::BackendKind::Compiler)); + } + let engine = polkavm::Engine::new(&config).expect( + "on-chain (no_std) use of interpreter is hard coded. + interpreter is available on all plattforms; qed", + ); let mut module_config = polkavm::ModuleConfig::new(); module_config.set_page_size(limits::PAGE_SIZE); @@ -306,6 +300,15 @@ impl WasmBlob { Error::::CodeRejected })?; + // This is checked at deploy time but we also want to reject pre-existing + // 32bit programs. + // TODO: Remove when we reset the test net. + // https://github.com/paritytech/contract-issues/issues/11 + if !module.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + let entry_program_counter = module .exports() .find(|export| export.symbol().as_bytes() == entry_point.identifier().as_bytes()) @@ -327,7 +330,7 @@ impl WasmBlob { instance.set_gas(gas_limit_polkavm); instance.prepare_call_untyped(entry_program_counter, &[]); - Ok(PreparedCall { module, instance, runtime, api_version }) + Ok(PreparedCall { module, instance, runtime }) } } @@ -348,13 +351,7 @@ where function: ExportedFunction, input_data: Vec, ) -> ExecResult { - let api_version = if ::UnsafeUnstableInterface::get() { - ApiVersion::UnsafeNewest - } else { - ApiVersion::Versioned(self.code_info.api_version) - }; - let prepared_call = - self.prepare_call(Runtime::new(ext, input_data), function, api_version)?; + let prepared_call = self.prepare_call(Runtime::new(ext, input_data), function)?; prepared_call.call() } diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 3e2c83db1ebd..648a1621c198 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -27,7 +27,7 @@ use crate::{ Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; -use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeLimit, Encode}; use core::{fmt, marker::PhantomData, mem}; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, @@ -44,14 +44,6 @@ type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; -#[derive(Clone, Copy)] -pub enum ApiVersion { - /// Expose all APIs even unversioned ones. Only used for testing and benchmarking. - UnsafeNewest, - /// Only expose API's up to and including the specified version. - Versioned(u16), -} - /// Abstraction over the memory access within syscalls. /// /// The reason for this abstraction is that we run syscalls on the host machine when @@ -126,34 +118,13 @@ pub trait Memory { /// /// # Note /// - /// There must be an extra benchmark for determining the influence of `len` with - /// regard to the overall weight. + /// Make sure to charge a proportional amount of weight if `len` is not fixed. fn read_as_unbounded(&self, ptr: u32, len: u32) -> Result { let buf = self.read(ptr, len)?; let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } - - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// # Only use on fixed size types - /// - /// Don't use this for types where the encoded size is not fixed but merely bounded. Otherwise - /// this implementation will out of bound access the buffer declared by the guest. Some examples - /// of those bounded but not fixed types: Enums with data, `BoundedVec` or any compact encoded - /// integer. - /// - /// # Note - /// - /// The weight of reading a fixed value is included in the overall weight of any - /// contract callable function. - fn read_as(&self, ptr: u32) -> Result { - let buf = self.read(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) - .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; - Ok(decoded) - } } /// Allows syscalls access to the PolkaVM instance they are executing in. @@ -164,8 +135,8 @@ pub trait Memory { pub trait PolkaVmInstance: Memory { fn gas(&self) -> polkavm::Gas; fn set_gas(&mut self, gas: polkavm::Gas); - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32); - fn write_output(&mut self, output: u32); + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64); + fn write_output(&mut self, output: u64); } // Memory implementation used in benchmarking where guest memory is mapped into the host. @@ -214,7 +185,7 @@ impl PolkaVmInstance for polkavm::RawInstance { self.set_gas(gas) } - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32) { + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64) { ( self.reg(polkavm::Reg::A0), self.reg(polkavm::Reg::A1), @@ -225,7 +196,7 @@ impl PolkaVmInstance for polkavm::RawInstance { ) } - fn write_output(&mut self, output: u32) { + fn write_output(&mut self, output: u64) { self.set_reg(polkavm::Reg::A0, output); } } @@ -296,8 +267,12 @@ pub enum RuntimeCosts { CopyFromContract(u32), /// Weight charged for copying data to the sandbox. CopyToContract(u32), + /// Weight of calling `seal_call_data_load``. + CallDataLoad, /// Weight of calling `seal_caller`. Caller, + /// Weight of calling `seal_call_data_size`. + CallDataSize, /// Weight of calling `seal_origin`. Origin, /// Weight of calling `seal_is_contract`. @@ -458,6 +433,8 @@ impl Token for RuntimeCosts { HostFn => cost_args!(noop_host_fn, 1), CopyToContract(len) => T::WeightInfo::seal_input(len), CopyFromContract(len) => T::WeightInfo::seal_return(len), + CallDataSize => T::WeightInfo::seal_call_data_size(), + CallDataLoad => T::WeightInfo::seal_call_data_load(), Caller => T::WeightInfo::seal_caller(), Origin => T::WeightInfo::seal_origin(), IsContract => T::WeightInfo::seal_is_contract(), @@ -572,7 +549,6 @@ impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { interrupt: Result, module: &polkavm::Module, instance: &mut M, - api_version: ApiVersion, ) -> Option { use polkavm::InterruptKind::*; @@ -592,7 +568,7 @@ impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { let Some(syscall_symbol) = module.imports().get(idx) else { return Some(Err(>::InvalidSyscall.into())); }; - match self.handle_ecall(instance, syscall_symbol.as_bytes(), api_version) { + match self.handle_ecall(instance, syscall_symbol.as_bytes()) { Ok(None) => None, Ok(Some(return_value)) => { instance.write_output(return_value); @@ -766,29 +742,24 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { Ok(()) } - /// Fallible conversion of `DispatchError` to `ReturnErrorCode`. - fn err_into_return_code(from: DispatchError) -> Result { - use ReturnErrorCode::*; - - let transfer_failed = Error::::TransferFailed.into(); - let no_code = Error::::CodeNotFound.into(); - let not_found = Error::::ContractNotFound.into(); - - match from { - x if x == transfer_failed => Ok(TransferFailed), - x if x == no_code => Ok(CodeNotFound), - x if x == not_found => Ok(NotCallable), - err => Err(err), - } - } - /// Fallible conversion of a `ExecError` to `ReturnErrorCode`. + /// + /// This is used when converting the error returned from a subcall in order to decide + /// whether to trap the caller or allow handling of the error. fn exec_error_into_return_code(from: ExecError) -> Result { use crate::exec::ErrorOrigin::Callee; + use ReturnErrorCode::*; + + let transfer_failed = Error::::TransferFailed.into(); + let out_of_gas = Error::::OutOfGas.into(); + let out_of_deposit = Error::::StorageDepositLimitExhausted.into(); + // errors in the callee do not trap the caller match (from.error, from.origin) { - (_, Callee) => Ok(ReturnErrorCode::CalleeTrapped), - (err, _) => Self::err_into_return_code(err), + (err, _) if err == transfer_failed => Ok(TransferFailed), + (err, Callee) if err == out_of_gas || err == out_of_deposit => Ok(OutOfResources), + (_, Callee) => Ok(CalleeTrapped), + (err, _) => Err(err), } } @@ -1153,14 +1124,18 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { #[define_env] pub mod env { /// Noop function used to benchmark the time it takes to execute an empty function. + /// + /// Marked as stable because it needs to be called from benchmarks even when the benchmarked + /// parachain has unstable functions disabled. #[cfg(feature = "runtime-benchmarks")] + #[stable] fn noop(&mut self, memory: &mut M) -> Result<(), TrapReason> { Ok(()) } /// Set the value at the given key in the contract storage. /// See [`pallet_revive_uapi::HostFn::set_storage_v2`] - #[api_version(0)] + #[stable] #[mutating] fn set_storage( &mut self, @@ -1174,23 +1149,9 @@ pub mod env { self.set_storage(memory, flags, key_ptr, key_len, value_ptr, value_len) } - /// Clear the value at the given key in the contract storage. - /// See [`pallet_revive_uapi::HostFn::clear_storage`] - #[api_version(0)] - #[mutating] - fn clear_storage( - &mut self, - memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - ) -> Result { - self.clear_storage(memory, flags, key_ptr, key_len) - } - /// Retrieve the value under the given key from storage. /// See [`pallet_revive_uapi::HostFn::get_storage`] - #[api_version(0)] + #[stable] fn get_storage( &mut self, memory: &mut M, @@ -1203,38 +1164,9 @@ pub mod env { self.get_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) } - /// Checks whether there is a value stored under the given key. - /// See [`pallet_revive_uapi::HostFn::contains_storage`] - #[api_version(0)] - fn contains_storage( - &mut self, - memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - ) -> Result { - self.contains_storage(memory, flags, key_ptr, key_len) - } - - /// Retrieve and remove the value under the given key from storage. - /// See [`pallet_revive_uapi::HostFn::take_storage`] - #[api_version(0)] - #[mutating] - fn take_storage( - &mut self, - memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result { - self.take_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) - } - /// Make a call to another contract. /// See [`pallet_revive_uapi::HostFn::call`]. - #[api_version(0)] + #[stable] fn call( &mut self, memory: &mut M, @@ -1265,7 +1197,7 @@ pub mod env { /// Execute code in the context (storage, caller, value) of the current contract. /// See [`pallet_revive_uapi::HostFn::delegate_call`]. - #[api_version(0)] + #[stable] fn delegate_call( &mut self, memory: &mut M, @@ -1295,7 +1227,7 @@ pub mod env { /// Instantiate a contract with the specified code hash. /// See [`pallet_revive_uapi::HostFn::instantiate`]. - #[api_version(0)] + #[stable] #[mutating] fn instantiate( &mut self, @@ -1327,17 +1259,25 @@ pub mod env { ) } - /// Remove the calling account and transfer remaining **free** balance. - /// See [`pallet_revive_uapi::HostFn::terminate`]. - #[api_version(0)] - #[mutating] - fn terminate(&mut self, memory: &mut M, beneficiary_ptr: u32) -> Result<(), TrapReason> { - self.terminate(memory, beneficiary_ptr) + /// Returns the total size of the contract call input data. + /// See [`pallet_revive_uapi::HostFn::call_data_size `]. + #[stable] + fn call_data_size(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::CallDataSize)?; + let value = + U256::from(self.input_data.as_ref().map(|input| input.len()).unwrap_or_default()); + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &value.to_little_endian(), + false, + already_charged, + )?) } /// Stores the input passed by the caller into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::input`]. - #[api_version(0)] + #[stable] fn input(&mut self, memory: &mut M, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { if let Some(input) = self.input_data.take() { self.write_sandbox_output(memory, out_ptr, out_len_ptr, &input, false, |len| { @@ -1350,9 +1290,40 @@ pub mod env { } } + /// Stores the U256 value at given call input `offset` into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::call_data_load`]. + #[stable] + fn call_data_load( + &mut self, + memory: &mut M, + out_ptr: u32, + offset: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::CallDataLoad)?; + + let Some(input) = self.input_data.as_ref() else { + return Err(Error::::InputForwarded.into()); + }; + + let mut data = [0; 32]; + let start = offset as usize; + let data = if start >= input.len() { + data // Any index is valid to request; OOB offsets return zero. + } else { + let end = start.saturating_add(32).min(input.len()); + data[..end - start].copy_from_slice(&input[start..end]); + data.reverse(); + data // Solidity expects right-padded data + }; + + self.write_fixed_sandbox_output(memory, out_ptr, &data, false, already_charged)?; + + Ok(()) + } + /// Cease contract execution and save a data buffer as a result of the execution. /// See [`pallet_revive_uapi::HostFn::return_value`]. - #[api_version(0)] + #[stable] fn seal_return( &mut self, memory: &mut M, @@ -1366,7 +1337,7 @@ pub mod env { /// Stores the address of the caller into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::caller`]. - #[api_version(0)] + #[stable] fn caller(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Caller)?; let caller = ::AddressMapper::to_address(self.ext.caller().account_id()?); @@ -1381,7 +1352,7 @@ pub mod env { /// Stores the address of the call stack origin into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::origin`]. - #[api_version(0)] + #[stable] fn origin(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Origin)?; let origin = ::AddressMapper::to_address(self.ext.origin().account_id()?); @@ -1394,18 +1365,9 @@ pub mod env { )?) } - /// Checks whether a specified address belongs to a contract. - /// See [`pallet_revive_uapi::HostFn::is_contract`]. - #[api_version(0)] - fn is_contract(&mut self, memory: &mut M, account_ptr: u32) -> Result { - self.charge_gas(RuntimeCosts::IsContract)?; - let address = memory.read_h160(account_ptr)?; - Ok(self.ext.is_contract(&address) as u32) - } - /// Retrieve the code hash for a specified contract address. /// See [`pallet_revive_uapi::HostFn::code_hash`]. - #[api_version(0)] + #[stable] fn code_hash(&mut self, memory: &mut M, addr_ptr: u32, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::CodeHash)?; let address = memory.read_h160(addr_ptr)?; @@ -1420,7 +1382,7 @@ pub mod env { /// Retrieve the code size for a given contract address. /// See [`pallet_revive_uapi::HostFn::code_size`]. - #[api_version(0)] + #[stable] fn code_size(&mut self, memory: &mut M, addr_ptr: u32, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::CodeSize)?; let address = memory.read_h160(addr_ptr)?; @@ -1433,40 +1395,9 @@ pub mod env { )?) } - /// Retrieve the code hash of the currently executing contract. - /// See [`pallet_revive_uapi::HostFn::own_code_hash`]. - #[api_version(0)] - fn own_code_hash(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::OwnCodeHash)?; - let code_hash = *self.ext.own_code_hash(); - Ok(self.write_fixed_sandbox_output( - memory, - out_ptr, - code_hash.as_bytes(), - false, - already_charged, - )?) - } - - /// Checks whether the caller of the current contract is the origin of the whole call stack. - /// See [`pallet_revive_uapi::HostFn::caller_is_origin`]. - #[api_version(0)] - fn caller_is_origin(&mut self, _memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::CallerIsOrigin)?; - Ok(self.ext.caller_is_origin() as u32) - } - - /// Checks whether the caller of the current contract is root. - /// See [`pallet_revive_uapi::HostFn::caller_is_root`]. - #[api_version(0)] - fn caller_is_root(&mut self, _memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::CallerIsRoot)?; - Ok(self.ext.caller_is_root() as u32) - } - /// Stores the address of the current contract into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::address`]. - #[api_version(0)] + #[stable] fn address(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Address)?; let address = self.ext.address(); @@ -1481,7 +1412,7 @@ pub mod env { /// Stores the price for the specified amount of weight into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::weight_to_fee`]. - #[api_version(0)] + #[stable] fn weight_to_fee( &mut self, memory: &mut M, @@ -1500,30 +1431,9 @@ pub mod env { )?) } - /// Stores the amount of weight left into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::weight_left`]. - #[api_version(0)] - fn weight_left( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::WeightLeft)?; - let gas_left = &self.ext.gas_meter().gas_left().encode(); - Ok(self.write_sandbox_output( - memory, - out_ptr, - out_len_ptr, - gas_left, - false, - already_charged, - )?) - } - /// Stores the immutable data into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::get_immutable_data`]. - #[api_version(0)] + #[stable] fn get_immutable_data( &mut self, memory: &mut M, @@ -1539,7 +1449,7 @@ pub mod env { /// Attaches the supplied immutable data to the currently executing contract. /// See [`pallet_revive_uapi::HostFn::set_immutable_data`]. - #[api_version(0)] + #[stable] fn set_immutable_data(&mut self, memory: &mut M, ptr: u32, len: u32) -> Result<(), TrapReason> { if len > limits::IMMUTABLE_BYTES { return Err(Error::::OutOfBounds.into()); @@ -1553,7 +1463,7 @@ pub mod env { /// Stores the *free* balance of the current account into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::balance`]. - #[api_version(0)] + #[stable] fn balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Balance)?; Ok(self.write_fixed_sandbox_output( @@ -1567,7 +1477,7 @@ pub mod env { /// Stores the *free* balance of the supplied address into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::balance`]. - #[api_version(0)] + #[stable] fn balance_of( &mut self, memory: &mut M, @@ -1587,7 +1497,7 @@ pub mod env { /// Returns the chain ID. /// See [`pallet_revive_uapi::HostFn::chain_id`]. - #[api_version(0)] + #[stable] fn chain_id(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { Ok(self.write_fixed_sandbox_output( memory, @@ -1600,7 +1510,7 @@ pub mod env { /// Stores the value transferred along with this call/instantiate into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::value_transferred`]. - #[api_version(0)] + #[stable] fn value_transferred(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::ValueTransferred)?; Ok(self.write_fixed_sandbox_output( @@ -1614,7 +1524,7 @@ pub mod env { /// Load the latest block timestamp into the supplied buffer /// See [`pallet_revive_uapi::HostFn::now`]. - #[api_version(0)] + #[stable] fn now(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Now)?; Ok(self.write_fixed_sandbox_output( @@ -1626,23 +1536,9 @@ pub mod env { )?) } - /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::minimum_balance`]. - #[api_version(0)] - fn minimum_balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::MinimumBalance)?; - Ok(self.write_fixed_sandbox_output( - memory, - out_ptr, - &self.ext.minimum_balance().to_little_endian(), - false, - already_charged, - )?) - } - /// Deposit a contract event with the data buffer and optional list of topics. /// See [pallet_revive_uapi::HostFn::deposit_event] - #[api_version(0)] + #[stable] #[mutating] fn deposit_event( &mut self, @@ -1682,7 +1578,7 @@ pub mod env { /// Stores the current block number of the current contract into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::block_number`]. - #[api_version(0)] + #[stable] fn block_number(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::BlockNumber)?; Ok(self.write_fixed_sandbox_output( @@ -1696,7 +1592,7 @@ pub mod env { /// Stores the block hash at given block height into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::block_hash`]. - #[api_version(0)] + #[stable] fn block_hash( &mut self, memory: &mut M, @@ -1715,25 +1611,9 @@ pub mod env { )?) } - /// Computes the SHA2 256-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_sha2_256`]. - #[api_version(0)] - fn hash_sha2_256( - &mut self, - memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashSha256(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, sha2_256, input_ptr, input_len, output_ptr, - )?) - } - /// Computes the KECCAK 256-bit hash on the given input buffer. /// See [`pallet_revive_uapi::HostFn::hash_keccak_256`]. - #[api_version(0)] + #[stable] fn hash_keccak_256( &mut self, memory: &mut M, @@ -1747,36 +1627,44 @@ pub mod env { )?) } - /// Computes the BLAKE2 256-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_blake2_256`]. - #[api_version(0)] - fn hash_blake2_256( - &mut self, - memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashBlake256(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, blake2_256, input_ptr, input_len, output_ptr, + /// Stores the length of the data returned by the last call into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::return_data_size`]. + #[stable] + fn return_data_size(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &U256::from(self.ext.last_frame_output().data.len()).to_little_endian(), + false, + |len| Some(RuntimeCosts::CopyToContract(len)), )?) } - /// Computes the BLAKE2 128-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_blake2_128`]. - #[api_version(0)] - fn hash_blake2_128( + /// Stores data returned by the last call, starting from `offset`, into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::return_data`]. + #[stable] + fn return_data_copy( &mut self, memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + offset: u32, ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashBlake128(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, blake2_128, input_ptr, input_len, output_ptr, - )?) + let output = mem::take(self.ext.last_frame_output_mut()); + let result = if offset as usize > output.data.len() { + Err(Error::::OutOfBounds.into()) + } else { + self.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + &output.data[offset as usize..], + false, + |len| Some(RuntimeCosts::CopyToContract(len)), + ) + }; + *self.ext.last_frame_output_mut() = output; + Ok(result?) } /// Call into the chain extension provided by the chain if any. @@ -1809,28 +1697,6 @@ pub mod env { ret } - /// Emit a custom debug message. - /// See [`pallet_revive_uapi::HostFn::debug_message`]. - #[api_version(0)] - fn debug_message( - &mut self, - memory: &mut M, - str_ptr: u32, - str_len: u32, - ) -> Result { - let str_len = str_len.min(limits::DEBUG_BUFFER_BYTES); - self.charge_gas(RuntimeCosts::DebugMessage(str_len))?; - if self.ext.append_debug_buffer("") { - let data = memory.read(str_ptr, str_len)?; - if let Some(msg) = core::str::from_utf8(&data).ok() { - self.ext.append_debug_buffer(msg); - } - Ok(ReturnErrorCode::Success) - } else { - Ok(ReturnErrorCode::LoggingDisabled) - } - } - /// Call some dispatchable of the runtime. /// See [`frame_support::traits::call_runtime`]. #[mutating] @@ -1850,86 +1716,68 @@ pub mod env { ) } - /// Execute an XCM program locally, using the contract's address as the origin. - /// See [`pallet_revive_uapi::HostFn::execute_xcm`]. - #[mutating] - fn xcm_execute( - &mut self, - memory: &mut M, - msg_ptr: u32, - msg_len: u32, - ) -> Result { - use frame_support::dispatch::DispatchInfo; - use xcm::VersionedXcm; - use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; - - self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm> = memory.read_as_unbounded(msg_ptr, msg_len)?; - - let execute_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); - let weight = self.ext.gas_meter().gas_left().max(execute_weight); - let dispatch_info = DispatchInfo { call_weight: weight, ..Default::default() }; - - self.call_dispatchable::( - dispatch_info, - RuntimeCosts::CallXcmExecute, - |runtime| { - let origin = crate::RawOrigin::Signed(runtime.ext.account_id().clone()).into(); - let weight_used = <::Xcm>::execute( - origin, - Box::new(message), - weight.saturating_sub(execute_weight), - )?; + /// Checks whether the caller of the current contract is the origin of the whole call stack. + /// See [`pallet_revive_uapi::HostFn::caller_is_origin`]. + fn caller_is_origin(&mut self, _memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::CallerIsOrigin)?; + Ok(self.ext.caller_is_origin() as u32) + } - Ok(Some(weight_used.saturating_add(execute_weight)).into()) - }, - ) + /// Checks whether the caller of the current contract is root. + /// See [`pallet_revive_uapi::HostFn::caller_is_root`]. + fn caller_is_root(&mut self, _memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::CallerIsRoot)?; + Ok(self.ext.caller_is_root() as u32) } - /// Send an XCM program from the contract to the specified destination. - /// See [`pallet_revive_uapi::HostFn::send_xcm`]. + /// Clear the value at the given key in the contract storage. + /// See [`pallet_revive_uapi::HostFn::clear_storage`] #[mutating] - fn xcm_send( + fn clear_storage( &mut self, memory: &mut M, - dest_ptr: u32, - dest_len: u32, - msg_ptr: u32, - msg_len: u32, - output_ptr: u32, - ) -> Result { - use xcm::{VersionedLocation, VersionedXcm}; - use xcm_builder::{SendController, SendControllerWeightInfo}; - - self.charge_gas(RuntimeCosts::CopyFromContract(dest_len))?; - let dest: VersionedLocation = memory.read_as_unbounded(dest_ptr, dest_len)?; - - self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm<()> = memory.read_as_unbounded(msg_ptr, msg_len)?; + flags: u32, + key_ptr: u32, + key_len: u32, + ) -> Result { + self.clear_storage(memory, flags, key_ptr, key_len) + } - let weight = <::Xcm as SendController<_>>::WeightInfo::send(); - self.charge_gas(RuntimeCosts::CallRuntime(weight))?; - let origin = crate::RawOrigin::Signed(self.ext.account_id().clone()).into(); + /// Checks whether there is a value stored under the given key. + /// See [`pallet_revive_uapi::HostFn::contains_storage`] + fn contains_storage( + &mut self, + memory: &mut M, + flags: u32, + key_ptr: u32, + key_len: u32, + ) -> Result { + self.contains_storage(memory, flags, key_ptr, key_len) + } - match <::Xcm>::send(origin, dest.into(), message.into()) { - Ok(message_id) => { - memory.write(output_ptr, &message_id.encode())?; - Ok(ReturnErrorCode::Success) - }, - Err(e) => { - if self.ext.append_debug_buffer("") { - self.ext.append_debug_buffer("seal0::xcm_send failed with: "); - self.ext.append_debug_buffer(e.into()); - }; - Ok(ReturnErrorCode::XcmSendFailed) - }, + /// Emit a custom debug message. + /// See [`pallet_revive_uapi::HostFn::debug_message`]. + fn debug_message( + &mut self, + memory: &mut M, + str_ptr: u32, + str_len: u32, + ) -> Result { + let str_len = str_len.min(limits::DEBUG_BUFFER_BYTES); + self.charge_gas(RuntimeCosts::DebugMessage(str_len))?; + if self.ext.append_debug_buffer("") { + let data = memory.read(str_ptr, str_len)?; + if let Some(msg) = core::str::from_utf8(&data).ok() { + self.ext.append_debug_buffer(msg); + } + Ok(ReturnErrorCode::Success) + } else { + Ok(ReturnErrorCode::LoggingDisabled) } } /// Recovers the ECDSA public key from the given message hash and signature. /// See [`pallet_revive_uapi::HostFn::ecdsa_recover`]. - #[api_version(0)] fn ecdsa_recover( &mut self, memory: &mut M, @@ -1958,59 +1806,8 @@ pub mod env { } } - /// Verify a sr25519 signature - /// See [`pallet_revive_uapi::HostFn::sr25519_verify`]. - #[api_version(0)] - fn sr25519_verify( - &mut self, - memory: &mut M, - signature_ptr: u32, - pub_key_ptr: u32, - message_len: u32, - message_ptr: u32, - ) -> Result { - self.charge_gas(RuntimeCosts::Sr25519Verify(message_len))?; - - let mut signature: [u8; 64] = [0; 64]; - memory.read_into_buf(signature_ptr, &mut signature)?; - - let mut pub_key: [u8; 32] = [0; 32]; - memory.read_into_buf(pub_key_ptr, &mut pub_key)?; - - let message: Vec = memory.read(message_ptr, message_len)?; - - if self.ext.sr25519_verify(&signature, &message, &pub_key) { - Ok(ReturnErrorCode::Success) - } else { - Ok(ReturnErrorCode::Sr25519VerifyFailed) - } - } - - /// Replace the contract code at the specified address with new code. - /// See [`pallet_revive_uapi::HostFn::set_code_hash`]. - /// - /// Disabled until the internal implementation takes care of collecting - /// the immutable data of the new code hash. - #[mutating] - fn set_code_hash( - &mut self, - memory: &mut M, - code_hash_ptr: u32, - ) -> Result { - self.charge_gas(RuntimeCosts::SetCodeHash)?; - let code_hash: H256 = memory.read_h256(code_hash_ptr)?; - match self.ext.set_code_hash(code_hash) { - Err(err) => { - let code = Self::err_into_return_code(err)?; - Ok(code) - }, - Ok(()) => Ok(ReturnErrorCode::Success), - } - } - /// Calculates Ethereum address from the ECDSA compressed public key and stores /// See [`pallet_revive_uapi::HostFn::ecdsa_to_eth_address`]. - #[api_version(0)] fn ecdsa_to_eth_address( &mut self, memory: &mut M, @@ -2030,9 +1827,61 @@ pub mod env { } } + /// Computes the BLAKE2 128-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_blake2_128`]. + fn hash_blake2_128( + &mut self, + memory: &mut M, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::HashBlake128(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, blake2_128, input_ptr, input_len, output_ptr, + )?) + } + + /// Computes the BLAKE2 256-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_blake2_256`]. + fn hash_blake2_256( + &mut self, + memory: &mut M, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::HashBlake256(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, blake2_256, input_ptr, input_len, output_ptr, + )?) + } + + /// Computes the SHA2 256-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_sha2_256`]. + fn hash_sha2_256( + &mut self, + memory: &mut M, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::HashSha256(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, sha2_256, input_ptr, input_len, output_ptr, + )?) + } + + /// Checks whether a specified address belongs to a contract. + /// See [`pallet_revive_uapi::HostFn::is_contract`]. + fn is_contract(&mut self, memory: &mut M, account_ptr: u32) -> Result { + self.charge_gas(RuntimeCosts::IsContract)?; + let address = memory.read_h160(account_ptr)?; + Ok(self.ext.is_contract(&address) as u32) + } + /// Adds a new delegate dependency to the contract. /// See [`pallet_revive_uapi::HostFn::lock_delegate_dependency`]. - #[api_version(0)] #[mutating] fn lock_delegate_dependency( &mut self, @@ -2045,9 +1894,75 @@ pub mod env { Ok(()) } + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::minimum_balance`]. + fn minimum_balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::MinimumBalance)?; + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &self.ext.minimum_balance().to_little_endian(), + false, + already_charged, + )?) + } + + /// Retrieve the code hash of the currently executing contract. + /// See [`pallet_revive_uapi::HostFn::own_code_hash`]. + fn own_code_hash(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::OwnCodeHash)?; + let code_hash = *self.ext.own_code_hash(); + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + code_hash.as_bytes(), + false, + already_charged, + )?) + } + + /// Replace the contract code at the specified address with new code. + /// See [`pallet_revive_uapi::HostFn::set_code_hash`]. + /// + /// Disabled until the internal implementation takes care of collecting + /// the immutable data of the new code hash. + #[mutating] + fn set_code_hash(&mut self, memory: &mut M, code_hash_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::SetCodeHash)?; + let code_hash: H256 = memory.read_h256(code_hash_ptr)?; + self.ext.set_code_hash(code_hash)?; + Ok(()) + } + + /// Verify a sr25519 signature + /// See [`pallet_revive_uapi::HostFn::sr25519_verify`]. + fn sr25519_verify( + &mut self, + memory: &mut M, + signature_ptr: u32, + pub_key_ptr: u32, + message_len: u32, + message_ptr: u32, + ) -> Result { + self.charge_gas(RuntimeCosts::Sr25519Verify(message_len))?; + + let mut signature: [u8; 64] = [0; 64]; + memory.read_into_buf(signature_ptr, &mut signature)?; + + let mut pub_key: [u8; 32] = [0; 32]; + memory.read_into_buf(pub_key_ptr, &mut pub_key)?; + + let message: Vec = memory.read(message_ptr, message_len)?; + + if self.ext.sr25519_verify(&signature, &message, &pub_key) { + Ok(ReturnErrorCode::Success) + } else { + Ok(ReturnErrorCode::Sr25519VerifyFailed) + } + } + /// Removes the delegate dependency from the contract. /// see [`pallet_revive_uapi::HostFn::unlock_delegate_dependency`]. - #[api_version(0)] #[mutating] fn unlock_delegate_dependency( &mut self, @@ -2060,43 +1975,122 @@ pub mod env { Ok(()) } - /// Stores the length of the data returned by the last call into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::return_data_size`]. - #[api_version(0)] - fn return_data_size(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - Ok(self.write_fixed_sandbox_output( + /// Retrieve and remove the value under the given key from storage. + /// See [`pallet_revive_uapi::HostFn::take_storage`] + #[mutating] + fn take_storage( + &mut self, + memory: &mut M, + flags: u32, + key_ptr: u32, + key_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { + self.take_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) + } + + /// Remove the calling account and transfer remaining **free** balance. + /// See [`pallet_revive_uapi::HostFn::terminate`]. + #[mutating] + fn terminate(&mut self, memory: &mut M, beneficiary_ptr: u32) -> Result<(), TrapReason> { + self.terminate(memory, beneficiary_ptr) + } + + /// Stores the amount of weight left into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::weight_left`]. + fn weight_left( + &mut self, + memory: &mut M, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::WeightLeft)?; + let gas_left = &self.ext.gas_meter().gas_left().encode(); + Ok(self.write_sandbox_output( memory, out_ptr, - &U256::from(self.ext.last_frame_output().data.len()).to_little_endian(), + out_len_ptr, + gas_left, false, - |len| Some(RuntimeCosts::CopyToContract(len)), + already_charged, )?) } - /// Stores data returned by the last call, starting from `offset`, into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::return_data`]. - #[api_version(0)] - fn return_data_copy( + /// Execute an XCM program locally, using the contract's address as the origin. + /// See [`pallet_revive_uapi::HostFn::execute_xcm`]. + #[mutating] + fn xcm_execute( &mut self, memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - offset: u32, - ) -> Result<(), TrapReason> { - let output = mem::take(self.ext.last_frame_output_mut()); - let result = if offset as usize > output.data.len() { - Err(Error::::OutOfBounds.into()) - } else { - self.write_sandbox_output( - memory, - out_ptr, - out_len_ptr, - &output.data[offset as usize..], - false, - |len| Some(RuntimeCosts::CopyToContract(len)), - ) - }; - *self.ext.last_frame_output_mut() = output; - Ok(result?) + msg_ptr: u32, + msg_len: u32, + ) -> Result { + use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; + use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; + + self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm> = memory.read_as_unbounded(msg_ptr, msg_len)?; + + let execute_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + let weight = self.ext.gas_meter().gas_left().max(execute_weight); + let dispatch_info = DispatchInfo { call_weight: weight, ..Default::default() }; + + self.call_dispatchable::( + dispatch_info, + RuntimeCosts::CallXcmExecute, + |runtime| { + let origin = crate::RawOrigin::Signed(runtime.ext.account_id().clone()).into(); + let weight_used = <::Xcm>::execute( + origin, + Box::new(message), + weight.saturating_sub(execute_weight), + )?; + + Ok(Some(weight_used.saturating_add(execute_weight)).into()) + }, + ) + } + + /// Send an XCM program from the contract to the specified destination. + /// See [`pallet_revive_uapi::HostFn::send_xcm`]. + #[mutating] + fn xcm_send( + &mut self, + memory: &mut M, + dest_ptr: u32, + dest_len: u32, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use xcm::{VersionedLocation, VersionedXcm}; + use xcm_builder::{SendController, SendControllerWeightInfo}; + + self.charge_gas(RuntimeCosts::CopyFromContract(dest_len))?; + let dest: VersionedLocation = memory.read_as_unbounded(dest_ptr, dest_len)?; + + self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm<()> = memory.read_as_unbounded(msg_ptr, msg_len)?; + + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + self.charge_gas(RuntimeCosts::CallRuntime(weight))?; + let origin = crate::RawOrigin::Signed(self.ext.account_id().clone()).into(); + + match <::Xcm>::send(origin, dest.into(), message.into()) { + Ok(message_id) => { + memory.write(output_ptr, &message_id.encode())?; + Ok(ReturnErrorCode::Success) + }, + Err(e) => { + if self.ext.append_debug_buffer("") { + self.ext.append_debug_buffer("seal0::xcm_send failed with: "); + self.ext.append_debug_buffer(e.into()); + }; + Ok(ReturnErrorCode::XcmSendFailed) + }, + } } } diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs index 96654432a5cc..e8fec31b19e5 100644 --- a/substrate/frame/revive/src/weights.rs +++ b/substrate/frame/revive/src/weights.rs @@ -18,28 +18,28 @@ //! Autogenerated weights for `pallet_revive` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `a0d5968554fc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet +// --extrinsic=* // --chain=dev +// --pallet=pallet_revive +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/revive/src/weights.rs +// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --pallet=pallet_revive +// --heap-pages=4096 +// --template=substrate/.maintain/frame-weight-template.hbs // --no-storage-info -// --no-median-slopes // --no-min-squares -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./substrate/frame/revive/src/weights.rs -// --header=./substrate/HEADER-APACHE2 -// --template=./substrate/.maintain/frame-weight-template.hbs +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -80,10 +80,12 @@ pub trait WeightInfo { fn seal_set_immutable_data(n: u32, ) -> Weight; fn seal_value_transferred() -> Weight; fn seal_minimum_balance() -> Weight; + fn seal_call_data_size() -> Weight; fn seal_block_number() -> Weight; fn seal_block_hash() -> Weight; fn seal_now() -> Weight; fn seal_weight_to_fee() -> Weight; + fn seal_call_data_load() -> Weight; fn seal_input(n: u32, ) -> Weight; fn seal_return(n: u32, ) -> Weight; fn seal_terminate(n: u32, ) -> Weight; @@ -133,8 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 2_818_000 picoseconds. - Weight::from_parts(3_058_000, 1594) + // Minimum execution time: 2_921_000 picoseconds. + Weight::from_parts(3_048_000, 1594) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -144,10 +146,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `425 + k * (69 ±0)` // Estimated: `415 + k * (70 ±0)` - // Minimum execution time: 15_916_000 picoseconds. - Weight::from_parts(16_132_000, 415) - // Standard Error: 1_482 - .saturating_add(Weight::from_parts(1_185_583, 0).saturating_mul(k.into())) + // Minimum execution time: 16_060_000 picoseconds. + Weight::from_parts(3_234_033, 415) + // Standard Error: 1_160 + .saturating_add(Weight::from_parts(1_184_188, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -169,10 +171,10 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 262144]`. fn call_with_code_per_byte(_c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `7442` - // Minimum execution time: 88_115_000 picoseconds. - Weight::from_parts(92_075_651, 7442) + // Measured: `1536` + // Estimated: `7476` + // Minimum execution time: 93_624_000 picoseconds. + Weight::from_parts(98_332_129, 7476) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -194,14 +196,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn instantiate_with_code(c: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403` - // Estimated: `6326` - // Minimum execution time: 188_274_000 picoseconds. - Weight::from_parts(157_773_869, 6326) - // Standard Error: 11 - .saturating_add(Weight::from_parts(16, 0).saturating_mul(c.into())) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_464, 0).saturating_mul(i.into())) + // Measured: `416` + // Estimated: `6345` + // Minimum execution time: 196_202_000 picoseconds. + Weight::from_parts(169_823_092, 6345) + // Standard Error: 10 + .saturating_add(Weight::from_parts(30, 0).saturating_mul(c.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(4_487, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -223,11 +225,11 @@ impl WeightInfo for SubstrateWeight { fn instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1296` - // Estimated: `4739` - // Minimum execution time: 158_616_000 picoseconds. - Weight::from_parts(134_329_076, 4739) - // Standard Error: 15 - .saturating_add(Weight::from_parts(4_358, 0).saturating_mul(i.into())) + // Estimated: `4753` + // Minimum execution time: 162_423_000 picoseconds. + Weight::from_parts(144_467_590, 4753) + // Standard Error: 16 + .saturating_add(Weight::from_parts(4_405, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -245,10 +247,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `7442` - // Minimum execution time: 134_935_000 picoseconds. - Weight::from_parts(141_040_000, 7442) + // Measured: `1536` + // Estimated: `7476` + // Minimum execution time: 144_454_000 picoseconds. + Weight::from_parts(151_756_000, 7476) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -263,8 +265,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 51_026_000 picoseconds. - Weight::from_parts(53_309_143, 3574) + // Minimum execution time: 50_712_000 picoseconds. + Weight::from_parts(52_831_382, 3574) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -278,8 +280,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `285` // Estimated: `3750` - // Minimum execution time: 44_338_000 picoseconds. - Weight::from_parts(45_398_000, 3750) + // Minimum execution time: 44_441_000 picoseconds. + Weight::from_parts(46_242_000, 3750) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -291,8 +293,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `529` // Estimated: `6469` - // Minimum execution time: 26_420_000 picoseconds. - Weight::from_parts(27_141_000, 6469) + // Minimum execution time: 27_157_000 picoseconds. + Weight::from_parts(28_182_000, 6469) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -304,8 +306,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 39_735_000 picoseconds. - Weight::from_parts(41_260_000, 3574) + // Minimum execution time: 40_588_000 picoseconds. + Weight::from_parts(41_125_000, 3574) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -317,8 +319,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `56` // Estimated: `3521` - // Minimum execution time: 32_059_000 picoseconds. - Weight::from_parts(32_776_000, 3521) + // Minimum execution time: 31_849_000 picoseconds. + Weight::from_parts(32_674_000, 3521) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -330,8 +332,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 13_553_000 picoseconds. - Weight::from_parts(14_121_000, 3610) + // Minimum execution time: 14_510_000 picoseconds. + Weight::from_parts(14_986_000, 3610) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -339,24 +341,24 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_392_000 picoseconds. - Weight::from_parts(7_692_248, 0) - // Standard Error: 105 - .saturating_add(Weight::from_parts(180_036, 0).saturating_mul(r.into())) + // Minimum execution time: 7_324_000 picoseconds. + Weight::from_parts(8_363_388, 0) + // Standard Error: 230 + .saturating_add(Weight::from_parts(170_510, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 287_000 picoseconds. - Weight::from_parts(317_000, 0) + // Minimum execution time: 275_000 picoseconds. + Weight::from_parts(326_000, 0) } fn seal_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 235_000 picoseconds. - Weight::from_parts(288_000, 0) + // Minimum execution time: 263_000 picoseconds. + Weight::from_parts(292_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -364,8 +366,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `306` // Estimated: `3771` - // Minimum execution time: 10_101_000 picoseconds. - Weight::from_parts(10_420_000, 3771) + // Minimum execution time: 10_011_000 picoseconds. + Weight::from_parts(10_476_000, 3771) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) @@ -374,16 +376,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `403` // Estimated: `3868` - // Minimum execution time: 11_422_000 picoseconds. - Weight::from_parts(11_829_000, 3868) + // Minimum execution time: 11_253_000 picoseconds. + Weight::from_parts(11_642_000, 3868) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 247_000 picoseconds. - Weight::from_parts(282_000, 0) + // Minimum execution time: 244_000 picoseconds. + Weight::from_parts(318_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -393,44 +395,44 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `473` // Estimated: `3938` - // Minimum execution time: 14_856_000 picoseconds. - Weight::from_parts(15_528_000, 3938) + // Minimum execution time: 14_904_000 picoseconds. + Weight::from_parts(15_281_000, 3938) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 303_000 picoseconds. - Weight::from_parts(361_000, 0) + // Minimum execution time: 382_000 picoseconds. + Weight::from_parts(422_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 253_000 picoseconds. - Weight::from_parts(287_000, 0) + // Minimum execution time: 258_000 picoseconds. + Weight::from_parts(310_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(263_000, 0) + // Minimum execution time: 283_000 picoseconds. + Weight::from_parts(315_000, 0) } fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 628_000 picoseconds. - Weight::from_parts(697_000, 0) + // Minimum execution time: 637_000 picoseconds. + Weight::from_parts(726_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `0` - // Minimum execution time: 4_531_000 picoseconds. - Weight::from_parts(4_726_000, 0) + // Minimum execution time: 4_649_000 picoseconds. + Weight::from_parts(4_860_000, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -440,8 +442,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `264` // Estimated: `3729` - // Minimum execution time: 8_787_000 picoseconds. - Weight::from_parts(9_175_000, 3729) + // Minimum execution time: 9_053_000 picoseconds. + Weight::from_parts(9_480_000, 3729) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) @@ -451,10 +453,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `238 + n * (1 ±0)` // Estimated: `3703 + n * (1 ±0)` - // Minimum execution time: 5_760_000 picoseconds. - Weight::from_parts(6_591_336, 3703) - // Standard Error: 4 - .saturating_add(Weight::from_parts(628, 0).saturating_mul(n.into())) + // Minimum execution time: 5_991_000 picoseconds. + Weight::from_parts(6_760_389, 3703) + // Standard Error: 5 + .saturating_add(Weight::from_parts(627, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -465,32 +467,39 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_971_000 picoseconds. - Weight::from_parts(2_206_252, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(529, 0).saturating_mul(n.into())) + // Minimum execution time: 2_062_000 picoseconds. + Weight::from_parts(2_277_051, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(530, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 246_000 picoseconds. - Weight::from_parts(279_000, 0) + // Minimum execution time: 267_000 picoseconds. + Weight::from_parts(299_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 223_000 picoseconds. - Weight::from_parts(274_000, 0) + // Minimum execution time: 263_000 picoseconds. + Weight::from_parts(318_000, 0) + } + fn seal_call_data_size() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 264_000 picoseconds. + Weight::from_parts(303_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 213_000 picoseconds. - Weight::from_parts(270_000, 0) + // Minimum execution time: 267_000 picoseconds. + Weight::from_parts(296_000, 0) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) @@ -498,41 +507,48 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `30` // Estimated: `3495` - // Minimum execution time: 3_502_000 picoseconds. - Weight::from_parts(3_777_000, 3495) + // Minimum execution time: 3_622_000 picoseconds. + Weight::from_parts(3_794_000, 3495) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 232_000 picoseconds. - Weight::from_parts(277_000, 0) + // Minimum execution time: 244_000 picoseconds. + Weight::from_parts(298_000, 0) } fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_293_000 picoseconds. - Weight::from_parts(1_426_000, 0) + // Minimum execution time: 1_340_000 picoseconds. + Weight::from_parts(1_483_000, 0) + } + fn seal_call_data_load() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 244_000 picoseconds. + Weight::from_parts(295_000, 0) } /// The range of component `n` is `[0, 262140]`. fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 449_000 picoseconds. - Weight::from_parts(446_268, 0) + // Minimum execution time: 475_000 picoseconds. + Weight::from_parts(427_145, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(113, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(114, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262140]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 244_000 picoseconds. - Weight::from_parts(612_733, 0) + // Minimum execution time: 291_000 picoseconds. + Weight::from_parts(846_264, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(200, 0).saturating_mul(n.into())) } @@ -550,11 +566,11 @@ impl WeightInfo for SubstrateWeight { fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `324 + n * (88 ±0)` - // Estimated: `3789 + n * (2563 ±0)` - // Minimum execution time: 21_822_000 picoseconds. - Weight::from_parts(22_468_601, 3789) - // Standard Error: 7_303 - .saturating_add(Weight::from_parts(4_138_073, 0).saturating_mul(n.into())) + // Estimated: `3791 + n * (2563 ±0)` + // Minimum execution time: 22_494_000 picoseconds. + Weight::from_parts(23_028_153, 3791) + // Standard Error: 12_407 + .saturating_add(Weight::from_parts(4_238_442, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -567,22 +583,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_127_000 picoseconds. - Weight::from_parts(4_043_097, 0) - // Standard Error: 3_136 - .saturating_add(Weight::from_parts(209_603, 0).saturating_mul(t.into())) - // Standard Error: 28 - .saturating_add(Weight::from_parts(988, 0).saturating_mul(n.into())) + // Minimum execution time: 4_383_000 picoseconds. + Weight::from_parts(4_364_292, 0) + // Standard Error: 2_775 + .saturating_add(Weight::from_parts(210_189, 0).saturating_mul(t.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(952, 0).saturating_mul(n.into())) } /// The range of component `i` is `[0, 262144]`. fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 276_000 picoseconds. - Weight::from_parts(1_111_301, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(706, 0).saturating_mul(i.into())) + // Minimum execution time: 328_000 picoseconds. + Weight::from_parts(393_925, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(725, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -590,8 +606,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `744` // Estimated: `744` - // Minimum execution time: 7_869_000 picoseconds. - Weight::from_parts(8_190_000, 744) + // Minimum execution time: 7_649_000 picoseconds. + Weight::from_parts(8_025_000, 744) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -600,8 +616,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `10754` // Estimated: `10754` - // Minimum execution time: 42_793_000 picoseconds. - Weight::from_parts(43_861_000, 10754) + // Minimum execution time: 43_439_000 picoseconds. + Weight::from_parts(44_296_000, 10754) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -610,8 +626,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `744` // Estimated: `744` - // Minimum execution time: 8_753_000 picoseconds. - Weight::from_parts(9_235_000, 744) + // Minimum execution time: 8_919_000 picoseconds. + Weight::from_parts(9_392_000, 744) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -621,8 +637,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `10754` // Estimated: `10754` - // Minimum execution time: 44_446_000 picoseconds. - Weight::from_parts(45_586_000, 10754) + // Minimum execution time: 45_032_000 picoseconds. + Weight::from_parts(46_050_000, 10754) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -634,12 +650,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + o * (1 ±0)` // Estimated: `247 + o * (1 ±0)` - // Minimum execution time: 9_214_000 picoseconds. - Weight::from_parts(9_888_060, 247) - // Standard Error: 41 - .saturating_add(Weight::from_parts(151, 0).saturating_mul(n.into())) - // Standard Error: 41 - .saturating_add(Weight::from_parts(315, 0).saturating_mul(o.into())) + // Minimum execution time: 9_272_000 picoseconds. + Weight::from_parts(10_022_838, 247) + // Standard Error: 43 + .saturating_add(Weight::from_parts(513, 0).saturating_mul(n.into())) + // Standard Error: 43 + .saturating_add(Weight::from_parts(625, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -651,10 +667,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_647_000 picoseconds. - Weight::from_parts(9_553_009, 247) - // Standard Error: 48 - .saturating_add(Weight::from_parts(651, 0).saturating_mul(n.into())) + // Minimum execution time: 8_885_000 picoseconds. + Weight::from_parts(9_785_932, 247) + // Standard Error: 55 + .saturating_add(Weight::from_parts(612, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -666,10 +682,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_457_000 picoseconds. - Weight::from_parts(9_199_745, 247) - // Standard Error: 59 - .saturating_add(Weight::from_parts(1_562, 0).saturating_mul(n.into())) + // Minimum execution time: 8_440_000 picoseconds. + Weight::from_parts(9_453_769, 247) + // Standard Error: 62 + .saturating_add(Weight::from_parts(1_529, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -680,10 +696,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_025_000 picoseconds. - Weight::from_parts(8_700_911, 247) - // Standard Error: 49 - .saturating_add(Weight::from_parts(635, 0).saturating_mul(n.into())) + // Minimum execution time: 8_212_000 picoseconds. + Weight::from_parts(8_880_676, 247) + // Standard Error: 54 + .saturating_add(Weight::from_parts(673, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -694,10 +710,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 9_346_000 picoseconds. - Weight::from_parts(10_297_284, 247) - // Standard Error: 62 - .saturating_add(Weight::from_parts(1_396, 0).saturating_mul(n.into())) + // Minimum execution time: 9_491_000 picoseconds. + Weight::from_parts(10_313_570, 247) + // Standard Error: 65 + .saturating_add(Weight::from_parts(1_681, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -706,36 +722,36 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_428_000 picoseconds. - Weight::from_parts(1_517_000, 0) + // Minimum execution time: 1_530_000 picoseconds. + Weight::from_parts(1_642_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(1_942_000, 0) + // Minimum execution time: 1_851_000 picoseconds. + Weight::from_parts(1_999_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_403_000 picoseconds. - Weight::from_parts(1_539_000, 0) + // Minimum execution time: 1_429_000 picoseconds. + Weight::from_parts(1_527_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_676_000 picoseconds. - Weight::from_parts(1_760_000, 0) + // Minimum execution time: 1_689_000 picoseconds. + Weight::from_parts(1_772_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_119_000 picoseconds. - Weight::from_parts(1_205_000, 0) + // Minimum execution time: 1_049_000 picoseconds. + Weight::from_parts(1_153_000, 0) } /// The range of component `n` is `[0, 512]`. /// The range of component `o` is `[0, 512]`. @@ -743,50 +759,52 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_146_000 picoseconds. - Weight::from_parts(2_315_339, 0) - // Standard Error: 13 - .saturating_add(Weight::from_parts(327, 0).saturating_mul(n.into())) - // Standard Error: 13 - .saturating_add(Weight::from_parts(366, 0).saturating_mul(o.into())) + // Minimum execution time: 2_338_000 picoseconds. + Weight::from_parts(2_514_685, 0) + // Standard Error: 15 + .saturating_add(Weight::from_parts(299, 0).saturating_mul(n.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(403, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 512]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_950_000 picoseconds. - Weight::from_parts(2_271_073, 0) - // Standard Error: 15 - .saturating_add(Weight::from_parts(373, 0).saturating_mul(n.into())) + // Minimum execution time: 2_045_000 picoseconds. + Weight::from_parts(2_409_843, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(350, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_839_000 picoseconds. - Weight::from_parts(2_049_659, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) + // Minimum execution time: 1_891_000 picoseconds. + Weight::from_parts(2_117_702, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(289, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_716_000 picoseconds. - Weight::from_parts(1_893_932, 0) - // Standard Error: 12 - .saturating_add(Weight::from_parts(172, 0).saturating_mul(n.into())) + // Minimum execution time: 1_786_000 picoseconds. + Weight::from_parts(1_949_290, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(232, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. - fn seal_take_transient_storage(_n: u32, ) -> Weight { + fn seal_take_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_448_000 picoseconds. - Weight::from_parts(2_676_764, 0) + // Minimum execution time: 2_465_000 picoseconds. + Weight::from_parts(2_712_107, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(79, 0).saturating_mul(n.into())) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -802,12 +820,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1294 + t * (242 ±0)` + // Measured: `1294 + t * (243 ±0)` // Estimated: `4759 + t * (2501 ±0)` - // Minimum execution time: 39_786_000 picoseconds. - Weight::from_parts(41_175_457, 4759) - // Standard Error: 45_251 - .saturating_add(Weight::from_parts(2_375_617, 0).saturating_mul(t.into())) + // Minimum execution time: 41_377_000 picoseconds. + Weight::from_parts(43_024_676, 4759) + // Standard Error: 44_099 + .saturating_add(Weight::from_parts(1_689_315, 0).saturating_mul(t.into())) // Standard Error: 0 .saturating_add(Weight::from_parts(2, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) @@ -815,17 +833,19 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2501).saturating_mul(t.into())) } + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:1 w:0) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `1064` - // Estimated: `4529` - // Minimum execution time: 29_762_000 picoseconds. - Weight::from_parts(31_345_000, 4529) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `1237` + // Estimated: `4702` + // Minimum execution time: 36_324_000 picoseconds. + Weight::from_parts(37_657_000, 4702) + .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -838,12 +858,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn seal_instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1310` - // Estimated: `4748` - // Minimum execution time: 117_791_000 picoseconds. - Weight::from_parts(105_413_907, 4748) + // Measured: `1273` + // Estimated: `4736` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(110_177_403, 4736) // Standard Error: 11 - .saturating_add(Weight::from_parts(4_038, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(4_097, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -852,64 +872,64 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 638_000 picoseconds. - Weight::from_parts(4_703_710, 0) + // Minimum execution time: 650_000 picoseconds. + Weight::from_parts(4_208_007, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_349, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_396, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_085_000 picoseconds. - Weight::from_parts(3_630_716, 0) + // Minimum execution time: 1_101_000 picoseconds. + Weight::from_parts(4_521_803, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(3_567, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_609, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(3_733_026, 0) + // Minimum execution time: 654_000 picoseconds. + Weight::from_parts(3_060_461, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_492, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_531, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 653_000 picoseconds. - Weight::from_parts(4_627_285, 0) + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(3_784_567, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_478, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_526, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 45_786_000 picoseconds. - Weight::from_parts(36_383_470, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_396, 0).saturating_mul(n.into())) + // Minimum execution time: 42_892_000 picoseconds. + Weight::from_parts(25_002_714, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(5_252, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 48_140_000 picoseconds. - Weight::from_parts(49_720_000, 0) + // Minimum execution time: 46_990_000 picoseconds. + Weight::from_parts(48_960_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_565_000 picoseconds. - Weight::from_parts(12_704_000, 0) + // Minimum execution time: 12_870_000 picoseconds. + Weight::from_parts(13_062_000, 0) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -917,8 +937,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `300` // Estimated: `3765` - // Minimum execution time: 17_208_000 picoseconds. - Weight::from_parts(18_307_000, 3765) + // Minimum execution time: 17_810_000 picoseconds. + Weight::from_parts(18_667_000, 3765) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -928,8 +948,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `338` // Estimated: `3803` - // Minimum execution time: 13_686_000 picoseconds. - Weight::from_parts(14_186_000, 3803) + // Minimum execution time: 13_762_000 picoseconds. + Weight::from_parts(14_526_000, 3803) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -939,8 +959,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `338` // Estimated: `3561` - // Minimum execution time: 12_381_000 picoseconds. - Weight::from_parts(13_208_000, 3561) + // Minimum execution time: 12_753_000 picoseconds. + Weight::from_parts(13_199_000, 3561) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -949,10 +969,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_118_000 picoseconds. - Weight::from_parts(9_813_514, 0) - // Standard Error: 40 - .saturating_add(Weight::from_parts(71_154, 0).saturating_mul(r.into())) + // Minimum execution time: 9_060_000 picoseconds. + Weight::from_parts(10_131_024, 0) + // Standard Error: 72 + .saturating_add(Weight::from_parts(71_842, 0).saturating_mul(r.into())) } } @@ -964,8 +984,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 2_818_000 picoseconds. - Weight::from_parts(3_058_000, 1594) + // Minimum execution time: 2_921_000 picoseconds. + Weight::from_parts(3_048_000, 1594) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -975,10 +995,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `425 + k * (69 ±0)` // Estimated: `415 + k * (70 ±0)` - // Minimum execution time: 15_916_000 picoseconds. - Weight::from_parts(16_132_000, 415) - // Standard Error: 1_482 - .saturating_add(Weight::from_parts(1_185_583, 0).saturating_mul(k.into())) + // Minimum execution time: 16_060_000 picoseconds. + Weight::from_parts(3_234_033, 415) + // Standard Error: 1_160 + .saturating_add(Weight::from_parts(1_184_188, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1000,10 +1020,10 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 262144]`. fn call_with_code_per_byte(_c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `7442` - // Minimum execution time: 88_115_000 picoseconds. - Weight::from_parts(92_075_651, 7442) + // Measured: `1536` + // Estimated: `7476` + // Minimum execution time: 93_624_000 picoseconds. + Weight::from_parts(98_332_129, 7476) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1025,14 +1045,14 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn instantiate_with_code(c: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403` - // Estimated: `6326` - // Minimum execution time: 188_274_000 picoseconds. - Weight::from_parts(157_773_869, 6326) - // Standard Error: 11 - .saturating_add(Weight::from_parts(16, 0).saturating_mul(c.into())) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_464, 0).saturating_mul(i.into())) + // Measured: `416` + // Estimated: `6345` + // Minimum execution time: 196_202_000 picoseconds. + Weight::from_parts(169_823_092, 6345) + // Standard Error: 10 + .saturating_add(Weight::from_parts(30, 0).saturating_mul(c.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(4_487, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1054,11 +1074,11 @@ impl WeightInfo for () { fn instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1296` - // Estimated: `4739` - // Minimum execution time: 158_616_000 picoseconds. - Weight::from_parts(134_329_076, 4739) - // Standard Error: 15 - .saturating_add(Weight::from_parts(4_358, 0).saturating_mul(i.into())) + // Estimated: `4753` + // Minimum execution time: 162_423_000 picoseconds. + Weight::from_parts(144_467_590, 4753) + // Standard Error: 16 + .saturating_add(Weight::from_parts(4_405, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1076,10 +1096,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `7442` - // Minimum execution time: 134_935_000 picoseconds. - Weight::from_parts(141_040_000, 7442) + // Measured: `1536` + // Estimated: `7476` + // Minimum execution time: 144_454_000 picoseconds. + Weight::from_parts(151_756_000, 7476) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1094,8 +1114,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 51_026_000 picoseconds. - Weight::from_parts(53_309_143, 3574) + // Minimum execution time: 50_712_000 picoseconds. + Weight::from_parts(52_831_382, 3574) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1109,8 +1129,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `285` // Estimated: `3750` - // Minimum execution time: 44_338_000 picoseconds. - Weight::from_parts(45_398_000, 3750) + // Minimum execution time: 44_441_000 picoseconds. + Weight::from_parts(46_242_000, 3750) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1122,8 +1142,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `529` // Estimated: `6469` - // Minimum execution time: 26_420_000 picoseconds. - Weight::from_parts(27_141_000, 6469) + // Minimum execution time: 27_157_000 picoseconds. + Weight::from_parts(28_182_000, 6469) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1135,8 +1155,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 39_735_000 picoseconds. - Weight::from_parts(41_260_000, 3574) + // Minimum execution time: 40_588_000 picoseconds. + Weight::from_parts(41_125_000, 3574) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1148,8 +1168,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `56` // Estimated: `3521` - // Minimum execution time: 32_059_000 picoseconds. - Weight::from_parts(32_776_000, 3521) + // Minimum execution time: 31_849_000 picoseconds. + Weight::from_parts(32_674_000, 3521) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1161,8 +1181,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 13_553_000 picoseconds. - Weight::from_parts(14_121_000, 3610) + // Minimum execution time: 14_510_000 picoseconds. + Weight::from_parts(14_986_000, 3610) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -1170,24 +1190,24 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_392_000 picoseconds. - Weight::from_parts(7_692_248, 0) - // Standard Error: 105 - .saturating_add(Weight::from_parts(180_036, 0).saturating_mul(r.into())) + // Minimum execution time: 7_324_000 picoseconds. + Weight::from_parts(8_363_388, 0) + // Standard Error: 230 + .saturating_add(Weight::from_parts(170_510, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 287_000 picoseconds. - Weight::from_parts(317_000, 0) + // Minimum execution time: 275_000 picoseconds. + Weight::from_parts(326_000, 0) } fn seal_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 235_000 picoseconds. - Weight::from_parts(288_000, 0) + // Minimum execution time: 263_000 picoseconds. + Weight::from_parts(292_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -1195,8 +1215,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `306` // Estimated: `3771` - // Minimum execution time: 10_101_000 picoseconds. - Weight::from_parts(10_420_000, 3771) + // Minimum execution time: 10_011_000 picoseconds. + Weight::from_parts(10_476_000, 3771) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) @@ -1205,16 +1225,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `403` // Estimated: `3868` - // Minimum execution time: 11_422_000 picoseconds. - Weight::from_parts(11_829_000, 3868) + // Minimum execution time: 11_253_000 picoseconds. + Weight::from_parts(11_642_000, 3868) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 247_000 picoseconds. - Weight::from_parts(282_000, 0) + // Minimum execution time: 244_000 picoseconds. + Weight::from_parts(318_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -1224,44 +1244,44 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `473` // Estimated: `3938` - // Minimum execution time: 14_856_000 picoseconds. - Weight::from_parts(15_528_000, 3938) + // Minimum execution time: 14_904_000 picoseconds. + Weight::from_parts(15_281_000, 3938) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 303_000 picoseconds. - Weight::from_parts(361_000, 0) + // Minimum execution time: 382_000 picoseconds. + Weight::from_parts(422_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 253_000 picoseconds. - Weight::from_parts(287_000, 0) + // Minimum execution time: 258_000 picoseconds. + Weight::from_parts(310_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(263_000, 0) + // Minimum execution time: 283_000 picoseconds. + Weight::from_parts(315_000, 0) } fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 628_000 picoseconds. - Weight::from_parts(697_000, 0) + // Minimum execution time: 637_000 picoseconds. + Weight::from_parts(726_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `0` - // Minimum execution time: 4_531_000 picoseconds. - Weight::from_parts(4_726_000, 0) + // Minimum execution time: 4_649_000 picoseconds. + Weight::from_parts(4_860_000, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -1271,8 +1291,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `264` // Estimated: `3729` - // Minimum execution time: 8_787_000 picoseconds. - Weight::from_parts(9_175_000, 3729) + // Minimum execution time: 9_053_000 picoseconds. + Weight::from_parts(9_480_000, 3729) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) @@ -1282,10 +1302,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `238 + n * (1 ±0)` // Estimated: `3703 + n * (1 ±0)` - // Minimum execution time: 5_760_000 picoseconds. - Weight::from_parts(6_591_336, 3703) - // Standard Error: 4 - .saturating_add(Weight::from_parts(628, 0).saturating_mul(n.into())) + // Minimum execution time: 5_991_000 picoseconds. + Weight::from_parts(6_760_389, 3703) + // Standard Error: 5 + .saturating_add(Weight::from_parts(627, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1296,32 +1316,39 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_971_000 picoseconds. - Weight::from_parts(2_206_252, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(529, 0).saturating_mul(n.into())) + // Minimum execution time: 2_062_000 picoseconds. + Weight::from_parts(2_277_051, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(530, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 246_000 picoseconds. - Weight::from_parts(279_000, 0) + // Minimum execution time: 267_000 picoseconds. + Weight::from_parts(299_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 223_000 picoseconds. - Weight::from_parts(274_000, 0) + // Minimum execution time: 263_000 picoseconds. + Weight::from_parts(318_000, 0) + } + fn seal_call_data_size() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 264_000 picoseconds. + Weight::from_parts(303_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 213_000 picoseconds. - Weight::from_parts(270_000, 0) + // Minimum execution time: 267_000 picoseconds. + Weight::from_parts(296_000, 0) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) @@ -1329,41 +1356,48 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `30` // Estimated: `3495` - // Minimum execution time: 3_502_000 picoseconds. - Weight::from_parts(3_777_000, 3495) + // Minimum execution time: 3_622_000 picoseconds. + Weight::from_parts(3_794_000, 3495) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 232_000 picoseconds. - Weight::from_parts(277_000, 0) + // Minimum execution time: 244_000 picoseconds. + Weight::from_parts(298_000, 0) } fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_293_000 picoseconds. - Weight::from_parts(1_426_000, 0) + // Minimum execution time: 1_340_000 picoseconds. + Weight::from_parts(1_483_000, 0) + } + fn seal_call_data_load() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 244_000 picoseconds. + Weight::from_parts(295_000, 0) } /// The range of component `n` is `[0, 262140]`. fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 449_000 picoseconds. - Weight::from_parts(446_268, 0) + // Minimum execution time: 475_000 picoseconds. + Weight::from_parts(427_145, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(113, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(114, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262140]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 244_000 picoseconds. - Weight::from_parts(612_733, 0) + // Minimum execution time: 291_000 picoseconds. + Weight::from_parts(846_264, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(200, 0).saturating_mul(n.into())) } @@ -1381,11 +1415,11 @@ impl WeightInfo for () { fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `324 + n * (88 ±0)` - // Estimated: `3789 + n * (2563 ±0)` - // Minimum execution time: 21_822_000 picoseconds. - Weight::from_parts(22_468_601, 3789) - // Standard Error: 7_303 - .saturating_add(Weight::from_parts(4_138_073, 0).saturating_mul(n.into())) + // Estimated: `3791 + n * (2563 ±0)` + // Minimum execution time: 22_494_000 picoseconds. + Weight::from_parts(23_028_153, 3791) + // Standard Error: 12_407 + .saturating_add(Weight::from_parts(4_238_442, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1398,22 +1432,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_127_000 picoseconds. - Weight::from_parts(4_043_097, 0) - // Standard Error: 3_136 - .saturating_add(Weight::from_parts(209_603, 0).saturating_mul(t.into())) - // Standard Error: 28 - .saturating_add(Weight::from_parts(988, 0).saturating_mul(n.into())) + // Minimum execution time: 4_383_000 picoseconds. + Weight::from_parts(4_364_292, 0) + // Standard Error: 2_775 + .saturating_add(Weight::from_parts(210_189, 0).saturating_mul(t.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(952, 0).saturating_mul(n.into())) } /// The range of component `i` is `[0, 262144]`. fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 276_000 picoseconds. - Weight::from_parts(1_111_301, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(706, 0).saturating_mul(i.into())) + // Minimum execution time: 328_000 picoseconds. + Weight::from_parts(393_925, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(725, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1421,8 +1455,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `744` // Estimated: `744` - // Minimum execution time: 7_869_000 picoseconds. - Weight::from_parts(8_190_000, 744) + // Minimum execution time: 7_649_000 picoseconds. + Weight::from_parts(8_025_000, 744) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1431,8 +1465,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `10754` // Estimated: `10754` - // Minimum execution time: 42_793_000 picoseconds. - Weight::from_parts(43_861_000, 10754) + // Minimum execution time: 43_439_000 picoseconds. + Weight::from_parts(44_296_000, 10754) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1441,8 +1475,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `744` // Estimated: `744` - // Minimum execution time: 8_753_000 picoseconds. - Weight::from_parts(9_235_000, 744) + // Minimum execution time: 8_919_000 picoseconds. + Weight::from_parts(9_392_000, 744) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1452,8 +1486,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `10754` // Estimated: `10754` - // Minimum execution time: 44_446_000 picoseconds. - Weight::from_parts(45_586_000, 10754) + // Minimum execution time: 45_032_000 picoseconds. + Weight::from_parts(46_050_000, 10754) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1465,12 +1499,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + o * (1 ±0)` // Estimated: `247 + o * (1 ±0)` - // Minimum execution time: 9_214_000 picoseconds. - Weight::from_parts(9_888_060, 247) - // Standard Error: 41 - .saturating_add(Weight::from_parts(151, 0).saturating_mul(n.into())) - // Standard Error: 41 - .saturating_add(Weight::from_parts(315, 0).saturating_mul(o.into())) + // Minimum execution time: 9_272_000 picoseconds. + Weight::from_parts(10_022_838, 247) + // Standard Error: 43 + .saturating_add(Weight::from_parts(513, 0).saturating_mul(n.into())) + // Standard Error: 43 + .saturating_add(Weight::from_parts(625, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -1482,10 +1516,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_647_000 picoseconds. - Weight::from_parts(9_553_009, 247) - // Standard Error: 48 - .saturating_add(Weight::from_parts(651, 0).saturating_mul(n.into())) + // Minimum execution time: 8_885_000 picoseconds. + Weight::from_parts(9_785_932, 247) + // Standard Error: 55 + .saturating_add(Weight::from_parts(612, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1497,10 +1531,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_457_000 picoseconds. - Weight::from_parts(9_199_745, 247) - // Standard Error: 59 - .saturating_add(Weight::from_parts(1_562, 0).saturating_mul(n.into())) + // Minimum execution time: 8_440_000 picoseconds. + Weight::from_parts(9_453_769, 247) + // Standard Error: 62 + .saturating_add(Weight::from_parts(1_529, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1511,10 +1545,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_025_000 picoseconds. - Weight::from_parts(8_700_911, 247) - // Standard Error: 49 - .saturating_add(Weight::from_parts(635, 0).saturating_mul(n.into())) + // Minimum execution time: 8_212_000 picoseconds. + Weight::from_parts(8_880_676, 247) + // Standard Error: 54 + .saturating_add(Weight::from_parts(673, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1525,10 +1559,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 9_346_000 picoseconds. - Weight::from_parts(10_297_284, 247) - // Standard Error: 62 - .saturating_add(Weight::from_parts(1_396, 0).saturating_mul(n.into())) + // Minimum execution time: 9_491_000 picoseconds. + Weight::from_parts(10_313_570, 247) + // Standard Error: 65 + .saturating_add(Weight::from_parts(1_681, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1537,36 +1571,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_428_000 picoseconds. - Weight::from_parts(1_517_000, 0) + // Minimum execution time: 1_530_000 picoseconds. + Weight::from_parts(1_642_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(1_942_000, 0) + // Minimum execution time: 1_851_000 picoseconds. + Weight::from_parts(1_999_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_403_000 picoseconds. - Weight::from_parts(1_539_000, 0) + // Minimum execution time: 1_429_000 picoseconds. + Weight::from_parts(1_527_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_676_000 picoseconds. - Weight::from_parts(1_760_000, 0) + // Minimum execution time: 1_689_000 picoseconds. + Weight::from_parts(1_772_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_119_000 picoseconds. - Weight::from_parts(1_205_000, 0) + // Minimum execution time: 1_049_000 picoseconds. + Weight::from_parts(1_153_000, 0) } /// The range of component `n` is `[0, 512]`. /// The range of component `o` is `[0, 512]`. @@ -1574,50 +1608,52 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_146_000 picoseconds. - Weight::from_parts(2_315_339, 0) - // Standard Error: 13 - .saturating_add(Weight::from_parts(327, 0).saturating_mul(n.into())) - // Standard Error: 13 - .saturating_add(Weight::from_parts(366, 0).saturating_mul(o.into())) + // Minimum execution time: 2_338_000 picoseconds. + Weight::from_parts(2_514_685, 0) + // Standard Error: 15 + .saturating_add(Weight::from_parts(299, 0).saturating_mul(n.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(403, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 512]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_950_000 picoseconds. - Weight::from_parts(2_271_073, 0) - // Standard Error: 15 - .saturating_add(Weight::from_parts(373, 0).saturating_mul(n.into())) + // Minimum execution time: 2_045_000 picoseconds. + Weight::from_parts(2_409_843, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(350, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_839_000 picoseconds. - Weight::from_parts(2_049_659, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) + // Minimum execution time: 1_891_000 picoseconds. + Weight::from_parts(2_117_702, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(289, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_716_000 picoseconds. - Weight::from_parts(1_893_932, 0) - // Standard Error: 12 - .saturating_add(Weight::from_parts(172, 0).saturating_mul(n.into())) + // Minimum execution time: 1_786_000 picoseconds. + Weight::from_parts(1_949_290, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(232, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. - fn seal_take_transient_storage(_n: u32, ) -> Weight { + fn seal_take_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_448_000 picoseconds. - Weight::from_parts(2_676_764, 0) + // Minimum execution time: 2_465_000 picoseconds. + Weight::from_parts(2_712_107, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(79, 0).saturating_mul(n.into())) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -1633,12 +1669,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1294 + t * (242 ±0)` + // Measured: `1294 + t * (243 ±0)` // Estimated: `4759 + t * (2501 ±0)` - // Minimum execution time: 39_786_000 picoseconds. - Weight::from_parts(41_175_457, 4759) - // Standard Error: 45_251 - .saturating_add(Weight::from_parts(2_375_617, 0).saturating_mul(t.into())) + // Minimum execution time: 41_377_000 picoseconds. + Weight::from_parts(43_024_676, 4759) + // Standard Error: 44_099 + .saturating_add(Weight::from_parts(1_689_315, 0).saturating_mul(t.into())) // Standard Error: 0 .saturating_add(Weight::from_parts(2, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) @@ -1646,17 +1682,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2501).saturating_mul(t.into())) } + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:1 w:0) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `1064` - // Estimated: `4529` - // Minimum execution time: 29_762_000 picoseconds. - Weight::from_parts(31_345_000, 4529) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `1237` + // Estimated: `4702` + // Minimum execution time: 36_324_000 picoseconds. + Weight::from_parts(37_657_000, 4702) + .saturating_add(RocksDbWeight::get().reads(3_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -1669,12 +1707,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn seal_instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1310` - // Estimated: `4748` - // Minimum execution time: 117_791_000 picoseconds. - Weight::from_parts(105_413_907, 4748) + // Measured: `1273` + // Estimated: `4736` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(110_177_403, 4736) // Standard Error: 11 - .saturating_add(Weight::from_parts(4_038, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(4_097, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1683,64 +1721,64 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 638_000 picoseconds. - Weight::from_parts(4_703_710, 0) + // Minimum execution time: 650_000 picoseconds. + Weight::from_parts(4_208_007, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_349, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_396, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_085_000 picoseconds. - Weight::from_parts(3_630_716, 0) + // Minimum execution time: 1_101_000 picoseconds. + Weight::from_parts(4_521_803, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(3_567, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_609, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(3_733_026, 0) + // Minimum execution time: 654_000 picoseconds. + Weight::from_parts(3_060_461, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_492, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_531, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 653_000 picoseconds. - Weight::from_parts(4_627_285, 0) + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(3_784_567, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_478, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_526, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 45_786_000 picoseconds. - Weight::from_parts(36_383_470, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_396, 0).saturating_mul(n.into())) + // Minimum execution time: 42_892_000 picoseconds. + Weight::from_parts(25_002_714, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(5_252, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 48_140_000 picoseconds. - Weight::from_parts(49_720_000, 0) + // Minimum execution time: 46_990_000 picoseconds. + Weight::from_parts(48_960_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_565_000 picoseconds. - Weight::from_parts(12_704_000, 0) + // Minimum execution time: 12_870_000 picoseconds. + Weight::from_parts(13_062_000, 0) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -1748,8 +1786,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `300` // Estimated: `3765` - // Minimum execution time: 17_208_000 picoseconds. - Weight::from_parts(18_307_000, 3765) + // Minimum execution time: 17_810_000 picoseconds. + Weight::from_parts(18_667_000, 3765) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1759,8 +1797,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `338` // Estimated: `3803` - // Minimum execution time: 13_686_000 picoseconds. - Weight::from_parts(14_186_000, 3803) + // Minimum execution time: 13_762_000 picoseconds. + Weight::from_parts(14_526_000, 3803) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1770,8 +1808,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `338` // Estimated: `3561` - // Minimum execution time: 12_381_000 picoseconds. - Weight::from_parts(13_208_000, 3561) + // Minimum execution time: 12_753_000 picoseconds. + Weight::from_parts(13_199_000, 3561) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1780,9 +1818,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_118_000 picoseconds. - Weight::from_parts(9_813_514, 0) - // Standard Error: 40 - .saturating_add(Weight::from_parts(71_154, 0).saturating_mul(r.into())) + // Minimum execution time: 9_060_000 picoseconds. + Weight::from_parts(10_131_024, 0) + // Standard Error: 72 + .saturating_add(Weight::from_parts(71_842, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml index 0c7461a35d69..1af5b327dfc7 100644 --- a/substrate/frame/revive/uapi/Cargo.toml +++ b/substrate/frame/revive/uapi/Cargo.toml @@ -20,12 +20,13 @@ codec = { features = [ "max-encoded-len", ], optional = true, workspace = true } -[target.'cfg(target_arch = "riscv32")'.dependencies] -polkavm-derive = { version = "0.14.0" } +[target.'cfg(target_arch = "riscv64")'.dependencies] +polkavm-derive = { version = "0.17.0" } [package.metadata.docs.rs] -default-target = ["wasm32-unknown-unknown"] +default-target = ["riscv64imac-unknown-none-elf"] [features] default = ["scale"] scale = ["dep:codec", "scale-info"] +unstable-api = [] diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 6b3a8b07f040..aa3203697898 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -14,8 +14,8 @@ use crate::{CallFlags, Result, ReturnFlags, StorageFlags}; use paste::paste; -#[cfg(target_arch = "riscv32")] -mod riscv32; +#[cfg(target_arch = "riscv64")] +mod riscv64; macro_rules! hash_fn { ( $name:ident, $bytes:literal ) => { @@ -45,17 +45,6 @@ pub trait HostFn: private::Sealed { /// - `output`: A reference to the output data buffer to write the address. fn address(output: &mut [u8; 20]); - /// Lock a new delegate dependency to the contract. - /// - /// Traps if the maximum number of delegate_dependencies is reached or if - /// the delegate dependency already exists. - /// - /// # Parameters - /// - /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps - /// otherwise. - fn lock_delegate_dependency(code_hash: &[u8; 32]); - /// Get the contract immutable data. /// /// Traps if: @@ -98,20 +87,12 @@ pub trait HostFn: private::Sealed { /// Returns the [EIP-155](https://eips.ethereum.org/EIPS/eip-155) chain ID. fn chain_id(output: &mut [u8; 32]); - /// Stores the current block number of the current contract into the supplied buffer. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the block number. - fn block_number(output: &mut [u8; 32]); - - /// Stores the block hash of the given block number into the supplied buffer. + /// Stores the call data size as little endian U256 value into the supplied buffer. /// /// # Parameters /// - /// - `block_number`: A reference to the block number buffer. - /// - `output`: A reference to the output data buffer to write the block number. - fn block_hash(block_number: &[u8; 32], output: &mut [u8; 32]); + /// - `output`: A reference to the output data buffer to write the call data size. + fn call_data_size(output: &mut [u8; 32]); /// Call (possibly transferring some amount of funds) into the specified account. /// @@ -138,7 +119,7 @@ pub trait HostFn: private::Sealed { /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - /// - [NotCallable][`crate::ReturnErrorCode::NotCallable] + /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] fn call( flags: CallFlags, callee: &[u8; 20], @@ -150,56 +131,6 @@ pub trait HostFn: private::Sealed { output: Option<&mut &mut [u8]>, ) -> Result; - /// Call into the chain extension provided by the chain if any. - /// - /// Handling of the input values is up to the specific chain extension and so is the - /// return value. The extension can decide to use the inputs as primitive inputs or as - /// in/out arguments by interpreting them as pointers. Any caller of this function - /// must therefore coordinate with the chain that it targets. - /// - /// # Note - /// - /// If no chain extension exists the contract will trap with the `NoChainExtension` - /// module error. - /// - /// # Parameters - /// - /// - `func_id`: The function id of the chain extension. - /// - `input`: The input data buffer. - /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` - /// is provided then the output buffer is not copied. - /// - /// # Return - /// - /// The chain extension returned value, if executed successfully. - fn call_chain_extension(func_id: u32, input: &[u8], output: Option<&mut &mut [u8]>) -> u32; - - /// Call some dispatchable of the runtime. - /// - /// # Parameters - /// - /// - `call`: The call data. - /// - /// # Return - /// - /// Returns `Error::Success` when the dispatchable was successfully executed and - /// returned `Ok`. When the dispatchable was executed but returned an error - /// `Error::CallRuntimeFailed` is returned. The full error is not - /// provided because it is not guaranteed to be stable. - /// - /// # Comparison with `ChainExtension` - /// - /// Just as a chain extension this API allows the runtime to extend the functionality - /// of contracts. While making use of this function is generally easier it cannot be - /// used in all cases. Consider writing a chain extension if you need to do perform - /// one of the following tasks: - /// - /// - Return data. - /// - Provide functionality **exclusively** to contracts. - /// - Provide custom weights. - /// - Avoid the need to keep the `Call` data structure stable. - fn call_runtime(call: &[u8]) -> Result; - /// Stores the address of the caller into the supplied buffer. /// /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the @@ -225,38 +156,6 @@ pub trait HostFn: private::Sealed { /// - `output`: A reference to the output data buffer to write the origin's address. fn origin(output: &mut [u8; 20]); - /// Checks whether the caller of the current contract is the origin of the whole call stack. - /// - /// Prefer this over [`is_contract()`][`Self::is_contract`] when checking whether your contract - /// is being called by a contract or a plain account. The reason is that it performs better - /// since it does not need to do any storage lookups. - /// - /// # Return - /// - /// A return value of `true` indicates that this contract is being called by a plain account - /// and `false` indicates that the caller is another contract. - fn caller_is_origin() -> bool; - - /// Checks whether the caller of the current contract is root. - /// - /// Note that only the origin of the call stack can be root. Hence this function returning - /// `true` implies that the contract is being called by the origin. - /// - /// A return value of `true` indicates that this contract is being called by a root origin, - /// and `false` indicates that the caller is a signed origin. - fn caller_is_root() -> u32; - - /// Clear the value at the given key in the contract storage. - /// - /// # Parameters - /// - /// - `key`: The storage key. - /// - /// # Return - /// - /// Returns the size of the pre-existing value at the specified key if any. - fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option; - /// Retrieve the code hash for a specified contract address. /// /// # Parameters @@ -283,37 +182,6 @@ pub trait HostFn: private::Sealed { /// If `addr` is not a contract the `output` will be zero. fn code_size(addr: &[u8; 20], output: &mut [u8; 32]); - /// Checks whether there is a value stored under the given key. - /// - /// The key length must not exceed the maximum defined by the contracts module parameter. - /// - /// # Parameters - /// - `key`: The storage key. - /// - /// # Return - /// - /// Returns the size of the pre-existing value at the specified key if any. - fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option; - - /// Emit a custom debug message. - /// - /// No newlines are added to the supplied message. - /// Specifying invalid UTF-8 just drops the message with no trap. - /// - /// This is a no-op if debug message recording is disabled which is always the case - /// when the code is executing on-chain. The message is interpreted as UTF-8 and - /// appended to the debug buffer which is then supplied to the calling RPC client. - /// - /// # Note - /// - /// Even though no action is taken when debug message recording is disabled there is still - /// a non trivial overhead (and weight cost) associated with calling this function. Contract - /// languages should remove calls to this function (either at runtime or compile time) when - /// not being executed as an RPC. For example, they could allow users to disable logging - /// through compile time flags (cargo features) for on-chain deployment. Additionally, the - /// return value of this function can be cached in order to prevent further calls at runtime. - fn debug_message(str: &[u8]) -> Result; - /// Execute code in the context (storage, caller, value) of the current contract. /// /// Reentrancy protection is always disabled since the callee is allowed @@ -341,7 +209,7 @@ pub trait HostFn: private::Sealed { /// /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] - /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] fn delegate_call( flags: CallFlags, address: &[u8; 20], @@ -362,49 +230,6 @@ pub trait HostFn: private::Sealed { /// - `topics`: The topics list. It can't contain duplicates. fn deposit_event(topics: &[[u8; 32]], data: &[u8]); - /// Recovers the ECDSA public key from the given message hash and signature. - /// - /// Writes the public key into the given output buffer. - /// Assumes the secp256k1 curve. - /// - /// # Parameters - /// - /// - `signature`: The signature bytes. - /// - `message_hash`: The message hash bytes. - /// - `output`: A reference to the output data buffer to write the public key. - /// - /// # Errors - /// - /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] - fn ecdsa_recover( - signature: &[u8; 65], - message_hash: &[u8; 32], - output: &mut [u8; 33], - ) -> Result; - - /// Calculates Ethereum address from the ECDSA compressed public key and stores - /// it into the supplied buffer. - /// - /// # Parameters - /// - /// - `pubkey`: The public key bytes. - /// - `output`: A reference to the output data buffer to write the address. - /// - /// # Errors - /// - /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] - fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result; - - /// Stores the amount of weight left into the supplied buffer. - /// The data is encoded as Weight. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the weight left. - fn weight_left(output: &mut &mut [u8]); - /// Retrieve the value under the given key from storage. /// /// The key length must not exceed the maximum defined by the contracts module parameter. @@ -418,10 +243,7 @@ pub trait HostFn: private::Sealed { /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] fn get_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result; - hash_fn!(sha2_256, 32); hash_fn!(keccak_256, 32); - hash_fn!(blake2_256, 32); - hash_fn!(blake2_128, 16); /// Stores the input passed by the caller into the supplied buffer. /// @@ -436,6 +258,21 @@ pub trait HostFn: private::Sealed { /// - `output`: A reference to the output data buffer to write the input data. fn input(output: &mut &mut [u8]); + /// Stores the U256 value at given `offset` from the input passed by the caller + /// into the supplied buffer. + /// + /// # Note + /// - If `offset` is out of bounds, a value of zero will be returned. + /// - If `offset` is in bounds but there is not enough call data, the available data + /// is right-padded in order to fill a whole U256 value. + /// - The data written to `output` is a little endian U256 integer value. + /// + /// # Parameters + /// + /// - `output`: A reference to the fixed output data buffer to write the value. + /// - `offset`: The offset (index) into the call data. + fn call_data_load(output: &mut [u8; 32], offset: u32); + /// Instantiate a contract with the specified code hash. /// /// This function creates an account and executes the constructor defined in the code specified @@ -468,7 +305,7 @@ pub trait HostFn: private::Sealed { /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] fn instantiate( code_hash: &[u8; 32], ref_time_limit: u64, @@ -481,65 +318,294 @@ pub trait HostFn: private::Sealed { salt: Option<&[u8; 32]>, ) -> Result; - /// Checks whether a specified address belongs to a contract. + /// Load the latest block timestamp into the supplied buffer /// /// # Parameters /// - /// - `address`: The address to check + /// - `output`: A reference to the output data buffer to write the timestamp. + fn now(output: &mut [u8; 32]); + + /// Cease contract execution and save a data buffer as a result of the execution. + /// + /// This function never returns as it stops execution of the caller. + /// This is the only way to return a data buffer to the caller. Returning from + /// execution without calling this function is equivalent to calling: + /// ```nocompile + /// return_value(ReturnFlags::empty(), &[]) + /// ``` + /// + /// Using an unnamed non empty `ReturnFlags` triggers a trap. + /// + /// # Parameters + /// + /// - `flags`: Flag used to signal special return conditions to the supervisor. See + /// [`ReturnFlags`] for a documentation of the supported flags. + /// - `return_value`: The return value buffer. + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> !; + + /// Set the value at the given key in the contract storage. + /// + /// The key and value lengths must not exceed the maximums defined by the contracts module + /// parameters. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// - `encoded_value`: The storage value. /// /// # Return /// - /// Returns `true` if the address belongs to a contract. - fn is_contract(address: &[u8; 20]) -> bool; + /// Returns the size of the pre-existing value at the specified key if any. + fn set_storage(flags: StorageFlags, key: &[u8], value: &[u8]) -> Option; - /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + /// Stores the value transferred along with this call/instantiate into the supplied buffer. /// /// # Parameters /// - /// - `output`: A reference to the output data buffer to write the minimum balance. - fn minimum_balance(output: &mut [u8; 32]); + /// - `output`: A reference to the output data buffer to write the transferred value. + fn value_transferred(output: &mut [u8; 32]); - /// Retrieve the code hash of the currently executing contract. + /// Stores the price for the specified amount of gas into the supplied buffer. /// /// # Parameters /// - /// - `output`: A reference to the output data buffer to write the code hash. - fn own_code_hash(output: &mut [u8; 32]); + /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. + /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. + /// - `output`: A reference to the output data buffer to write the price. + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]); - /// Load the latest block timestamp into the supplied buffer + /// Stores the size of the returned data of the last contract call or instantiation. /// /// # Parameters /// - /// - `output`: A reference to the output data buffer to write the timestamp. - fn now(output: &mut [u8; 32]); + /// - `output`: A reference to the output buffer to write the size. + fn return_data_size(output: &mut [u8; 32]); - /// Removes the delegate dependency from the contract. + /// Stores the returned data of the last contract call or contract instantiation. /// - /// Traps if the delegate dependency does not exist. + /// # Parameters + /// - `output`: A reference to the output buffer to write the data. + /// - `offset`: Byte offset into the returned data + fn return_data_copy(output: &mut &mut [u8], offset: u32); + + /// Stores the current block number of the current contract into the supplied buffer. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the block number. + #[cfg(feature = "unstable-api")] + fn block_number(output: &mut [u8; 32]); + + /// Stores the block hash of the given block number into the supplied buffer. + /// + /// # Parameters + /// + /// - `block_number`: A reference to the block number buffer. + /// - `output`: A reference to the output data buffer to write the block number. + #[cfg(feature = "unstable-api")] + fn block_hash(block_number: &[u8; 32], output: &mut [u8; 32]); + + /// Call into the chain extension provided by the chain if any. + /// + /// Handling of the input values is up to the specific chain extension and so is the + /// return value. The extension can decide to use the inputs as primitive inputs or as + /// in/out arguments by interpreting them as pointers. Any caller of this function + /// must therefore coordinate with the chain that it targets. + /// + /// # Note + /// + /// If no chain extension exists the contract will trap with the `NoChainExtension` + /// module error. + /// + /// # Parameters + /// + /// - `func_id`: The function id of the chain extension. + /// - `input`: The input data buffer. + /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` + /// is provided then the output buffer is not copied. + /// + /// # Return + /// + /// The chain extension returned value, if executed successfully. + #[cfg(feature = "unstable-api")] + fn call_chain_extension(func_id: u32, input: &[u8], output: Option<&mut &mut [u8]>) -> u32; + + /// Call some dispatchable of the runtime. + /// + /// # Parameters + /// + /// - `call`: The call data. + /// + /// # Return + /// + /// Returns `Error::Success` when the dispatchable was successfully executed and + /// returned `Ok`. When the dispatchable was executed but returned an error + /// `Error::CallRuntimeFailed` is returned. The full error is not + /// provided because it is not guaranteed to be stable. + /// + /// # Comparison with `ChainExtension` + /// + /// Just as a chain extension this API allows the runtime to extend the functionality + /// of contracts. While making use of this function is generally easier it cannot be + /// used in all cases. Consider writing a chain extension if you need to do perform + /// one of the following tasks: + /// + /// - Return data. + /// - Provide functionality **exclusively** to contracts. + /// - Provide custom weights. + /// - Avoid the need to keep the `Call` data structure stable. + #[cfg(feature = "unstable-api")] + fn call_runtime(call: &[u8]) -> Result; + + /// Checks whether the caller of the current contract is the origin of the whole call stack. + /// + /// Prefer this over [`is_contract()`][`Self::is_contract`] when checking whether your contract + /// is being called by a contract or a plain account. The reason is that it performs better + /// since it does not need to do any storage lookups. + /// + /// # Return + /// + /// A return value of `true` indicates that this contract is being called by a plain account + /// and `false` indicates that the caller is another contract. + #[cfg(feature = "unstable-api")] + fn caller_is_origin() -> bool; + + /// Checks whether the caller of the current contract is root. + /// + /// Note that only the origin of the call stack can be root. Hence this function returning + /// `true` implies that the contract is being called by the origin. + /// + /// A return value of `true` indicates that this contract is being called by a root origin, + /// and `false` indicates that the caller is a signed origin. + #[cfg(feature = "unstable-api")] + fn caller_is_root() -> u32; + + /// Clear the value at the given key in the contract storage. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + #[cfg(feature = "unstable-api")] + fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option; + + /// Checks whether there is a value stored under the given key. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// - `key`: The storage key. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + #[cfg(feature = "unstable-api")] + fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option; + + /// Emit a custom debug message. + /// + /// No newlines are added to the supplied message. + /// Specifying invalid UTF-8 just drops the message with no trap. + /// + /// This is a no-op if debug message recording is disabled which is always the case + /// when the code is executing on-chain. The message is interpreted as UTF-8 and + /// appended to the debug buffer which is then supplied to the calling RPC client. + /// + /// # Note + /// + /// Even though no action is taken when debug message recording is disabled there is still + /// a non trivial overhead (and weight cost) associated with calling this function. Contract + /// languages should remove calls to this function (either at runtime or compile time) when + /// not being executed as an RPC. For example, they could allow users to disable logging + /// through compile time flags (cargo features) for on-chain deployment. Additionally, the + /// return value of this function can be cached in order to prevent further calls at runtime. + #[cfg(feature = "unstable-api")] + fn debug_message(str: &[u8]) -> Result; + + /// Recovers the ECDSA public key from the given message hash and signature. + /// + /// Writes the public key into the given output buffer. + /// Assumes the secp256k1 curve. + /// + /// # Parameters + /// + /// - `signature`: The signature bytes. + /// - `message_hash`: The message hash bytes. + /// - `output`: A reference to the output data buffer to write the public key. + /// + /// # Errors + /// + /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] + #[cfg(feature = "unstable-api")] + fn ecdsa_recover( + signature: &[u8; 65], + message_hash: &[u8; 32], + output: &mut [u8; 33], + ) -> Result; + + /// Calculates Ethereum address from the ECDSA compressed public key and stores + /// it into the supplied buffer. + /// + /// # Parameters + /// + /// - `pubkey`: The public key bytes. + /// - `output`: A reference to the output data buffer to write the address. + /// + /// # Errors + /// + /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] + #[cfg(feature = "unstable-api")] + fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result; + + #[cfg(feature = "unstable-api")] + hash_fn!(sha2_256, 32); + #[cfg(feature = "unstable-api")] + hash_fn!(blake2_256, 32); + #[cfg(feature = "unstable-api")] + hash_fn!(blake2_128, 16); + + /// Checks whether a specified address belongs to a contract. + /// + /// # Parameters + /// + /// - `address`: The address to check + /// + /// # Return + /// + /// Returns `true` if the address belongs to a contract. + #[cfg(feature = "unstable-api")] + fn is_contract(address: &[u8; 20]) -> bool; + + /// Lock a new delegate dependency to the contract. + /// + /// Traps if the maximum number of delegate_dependencies is reached or if + /// the delegate dependency already exists. /// /// # Parameters /// /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps /// otherwise. - fn unlock_delegate_dependency(code_hash: &[u8; 32]); + #[cfg(feature = "unstable-api")] + fn lock_delegate_dependency(code_hash: &[u8; 32]); - /// Cease contract execution and save a data buffer as a result of the execution. + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. /// - /// This function never returns as it stops execution of the caller. - /// This is the only way to return a data buffer to the caller. Returning from - /// execution without calling this function is equivalent to calling: - /// ```nocompile - /// return_value(ReturnFlags::empty(), &[]) - /// ``` + /// # Parameters /// - /// Using an unnamed non empty `ReturnFlags` triggers a trap. + /// - `output`: A reference to the output data buffer to write the minimum balance. + #[cfg(feature = "unstable-api")] + fn minimum_balance(output: &mut [u8; 32]); + + /// Retrieve the code hash of the currently executing contract. /// /// # Parameters /// - /// - `flags`: Flag used to signal special return conditions to the supervisor. See - /// [`ReturnFlags`] for a documentation of the supported flags. - /// - `return_value`: The return value buffer. - fn return_value(flags: ReturnFlags, return_value: &[u8]) -> !; + /// - `output`: A reference to the output data buffer to write the code hash. + #[cfg(feature = "unstable-api")] + fn own_code_hash(output: &mut [u8; 32]); /// Replace the contract code at the specified address with new code. /// @@ -566,25 +632,11 @@ pub trait HostFn: private::Sealed { /// - `code_hash`: The hash of the new code. Should be decodable as an `T::Hash`. Traps /// otherwise. /// - /// # Errors - /// - /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] - fn set_code_hash(code_hash: &[u8; 32]) -> Result; - - /// Set the value at the given key in the contract storage. - /// - /// The key and value lengths must not exceed the maximums defined by the contracts module - /// parameters. - /// - /// # Parameters - /// - /// - `key`: The storage key. - /// - `encoded_value`: The storage value. - /// - /// # Return + /// # Panics /// - /// Returns the size of the pre-existing value at the specified key if any. - fn set_storage(flags: StorageFlags, key: &[u8], value: &[u8]) -> Option; + /// Panics if there is no code on-chain with the specified hash. + #[cfg(feature = "unstable-api")] + fn set_code_hash(code_hash: &[u8; 32]); /// Verify a sr25519 signature /// @@ -596,6 +648,7 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// - [Sr25519VerifyFailed][`crate::ReturnErrorCode::Sr25519VerifyFailed] + #[cfg(feature = "unstable-api")] fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result; /// Retrieve and remove the value under the given key from storage. @@ -607,6 +660,7 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + #[cfg(feature = "unstable-api")] fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result; /// Remove the calling account and transfer remaining **free** balance. @@ -624,23 +678,30 @@ pub trait HostFn: private::Sealed { /// - The contract is live i.e is already on the call stack. /// - Failed to send the balance to the beneficiary. /// - The deletion queue is full. + #[cfg(feature = "unstable-api")] fn terminate(beneficiary: &[u8; 20]) -> !; - /// Stores the value transferred along with this call/instantiate into the supplied buffer. + /// Removes the delegate dependency from the contract. + /// + /// Traps if the delegate dependency does not exist. /// /// # Parameters /// - /// - `output`: A reference to the output data buffer to write the transferred value. - fn value_transferred(output: &mut [u8; 32]); + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + #[cfg(feature = "unstable-api")] + fn unlock_delegate_dependency(code_hash: &[u8; 32]); - /// Stores the price for the specified amount of gas into the supplied buffer. + /// Stores the amount of weight left into the supplied buffer. + /// The data is encoded as Weight. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// - /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. - /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. - /// - `output`: A reference to the output data buffer to write the price. - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]); + /// - `output`: A reference to the output data buffer to write the weight left. + #[cfg(feature = "unstable-api")] + fn weight_left(output: &mut &mut [u8]); /// Execute an XCM program locally, using the contract's address as the origin. /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that @@ -656,6 +717,7 @@ pub trait HostFn: private::Sealed { /// /// Returns `Error::Success` when the XCM execution attempt is successful. When the XCM /// execution fails, `ReturnCode::XcmExecutionFailed` is returned + #[cfg(feature = "unstable-api")] fn xcm_execute(msg: &[u8]) -> Result; /// Send an XCM program from the contract to the specified destination. @@ -673,21 +735,8 @@ pub trait HostFn: private::Sealed { /// /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM /// execution fails, `ReturnErrorCode::XcmSendFailed` is returned. + #[cfg(feature = "unstable-api")] fn xcm_send(dest: &[u8], msg: &[u8], output: &mut [u8; 32]) -> Result; - - /// Stores the size of the returned data of the last contract call or instantiation. - /// - /// # Parameters - /// - /// - `output`: A reference to the output buffer to write the size. - fn return_data_size(output: &mut [u8; 32]); - - /// Stores the returned data of the last contract call or contract instantiation. - /// - /// # Parameters - /// - `output`: A reference to the output buffer to write the data. - /// - `offset`: Byte offset into the returned data - fn return_data_copy(output: &mut &mut [u8], offset: u32); } mod private { diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs similarity index 85% rename from substrate/frame/revive/uapi/src/host/riscv32.rs rename to substrate/frame/revive/uapi/src/host/riscv64.rs index e8b27057ed18..d5a695262a24 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv64.rs @@ -26,10 +26,10 @@ mod sys { mod abi {} impl abi::FromHost for ReturnCode { - type Regs = (u32,); + type Regs = (u64,); fn from_host((a0,): Self::Regs) -> Self { - ReturnCode(a0) + ReturnCode(a0 as _) } } @@ -63,6 +63,7 @@ mod sys { pub fn instantiate(ptr: *const u8) -> ReturnCode; pub fn terminate(beneficiary_ptr: *const u8); pub fn input(out_ptr: *mut u8, out_len_ptr: *mut u32); + pub fn call_data_load(out_ptr: *mut u8, offset: u32); pub fn seal_return(flags: u32, data_ptr: *const u8, data_len: u32); pub fn caller(out_ptr: *mut u8); pub fn origin(out_ptr: *mut u8); @@ -89,6 +90,7 @@ mod sys { data_ptr: *const u8, data_len: u32, ); + pub fn call_data_size(out_ptr: *mut u8); pub fn block_number(out_ptr: *mut u8); pub fn block_hash(block_number_ptr: *const u8, out_ptr: *mut u8); pub fn hash_sha2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); @@ -115,7 +117,7 @@ mod sys { message_len: u32, message_ptr: *const u8, ) -> ReturnCode; - pub fn set_code_hash(code_hash_ptr: *const u8) -> ReturnCode; + pub fn set_code_hash(code_hash_ptr: *const u8); pub fn ecdsa_to_eth_address(key_ptr: *const u8, out_ptr: *mut u8) -> ReturnCode; pub fn instantiation_nonce() -> u64; pub fn lock_delegate_dependency(code_hash_ptr: *const u8); @@ -207,33 +209,33 @@ impl HostFn for HostFnImpl { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); let salt_ptr = ptr_or_sentinel(&salt); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { - code_hash: *const u8, + code_hash: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - address: *const u8, - output: *mut u8, - output_len: *mut u32, - salt: *const u8, + address: u32, + output: u32, + output_len: u32, + salt: u32, } let args = Args { - code_hash: code_hash.as_ptr(), + code_hash: code_hash.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - address, - output: output_ptr, - output_len: &mut output_len as *mut _, - salt: salt_ptr, + address: address as _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, + salt: salt_ptr as _, }; let ret_code = { unsafe { sys::instantiate(&args as *const Args as *const _) } }; @@ -257,31 +259,31 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - callee: *const u8, + callee: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - callee: callee.as_ptr(), + callee: callee.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::call(&args as *const Args as *const _) } }; @@ -293,10 +295,6 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn caller_is_root() -> u32 { - unsafe { sys::caller_is_root() }.into_u32() - } - fn delegate_call( flags: CallFlags, address: &[u8; 20], @@ -308,29 +306,29 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - address: *const u8, + address: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - input: *const u8, + deposit_limit: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - address: address.as_ptr(), + address: address.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::delegate_call(&args as *const Args as *const _) } }; @@ -366,17 +364,6 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option { - let ret_code = unsafe { sys::clear_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; - ret_code.into() - } - - fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option { - let ret_code = - unsafe { sys::contains_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; - ret_code.into() - } - fn get_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { let mut output_len = output.len() as u32; let ret_code = { @@ -394,33 +381,79 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { + fn input(output: &mut &mut [u8]) { let mut output_len = output.len() as u32; - let ret_code = { - unsafe { - sys::take_storage( - flags.bits(), - key.as_ptr(), - key.len() as u32, - output.as_mut_ptr(), - &mut output_len, - ) - } - }; + { + unsafe { sys::input(output.as_mut_ptr(), &mut output_len) }; + } extract_from_slice(output, output_len as usize); - ret_code.into() } - fn debug_message(str: &[u8]) -> Result { - let ret_code = unsafe { sys::debug_message(str.as_ptr(), str.len() as u32) }; - ret_code.into() + fn call_data_load(out_ptr: &mut [u8; 32], offset: u32) { + unsafe { sys::call_data_load(out_ptr.as_mut_ptr(), offset) }; } - fn terminate(beneficiary: &[u8; 20]) -> ! { - unsafe { sys::terminate(beneficiary.as_ptr()) } - panic!("terminate does not return"); + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> ! { + unsafe { sys::seal_return(flags.bits(), return_value.as_ptr(), return_value.len() as u32) } + panic!("seal_return does not return"); + } + + impl_wrapper_for! { + [u8; 32] => call_data_size, balance, value_transferred, now, chain_id; + [u8; 20] => address, caller, origin; + } + + #[cfg(feature = "unstable-api")] + impl_wrapper_for! { + [u8; 32] => block_number, minimum_balance; + } + + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { + unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; + } + + impl_hash_fn!(keccak_256, 32); + + fn get_immutable_data(output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { sys::get_immutable_data(output.as_mut_ptr(), &mut output_len) }; + extract_from_slice(output, output_len as usize); + } + + fn set_immutable_data(data: &[u8]) { + unsafe { sys::set_immutable_data(data.as_ptr(), data.len() as u32) } + } + + fn balance_of(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::balance_of(address.as_ptr(), output.as_mut_ptr()) }; + } + + fn code_hash(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::code_hash(address.as_ptr(), output.as_mut_ptr()) } } + fn code_size(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::code_size(address.as_ptr(), output.as_mut_ptr()) } + } + + fn return_data_size(output: &mut [u8; 32]) { + unsafe { sys::return_data_size(output.as_mut_ptr()) }; + } + + fn return_data_copy(output: &mut &mut [u8], offset: u32) { + let mut output_len = output.len() as u32; + { + unsafe { sys::return_data_copy(output.as_mut_ptr(), &mut output_len, offset) }; + } + extract_from_slice(output, output_len as usize); + } + + #[cfg(feature = "unstable-api")] + fn block_hash(block_number_ptr: &[u8; 32], output: &mut [u8; 32]) { + unsafe { sys::block_hash(block_number_ptr.as_ptr(), output.as_mut_ptr()) }; + } + + #[cfg(feature = "unstable-api")] fn call_chain_extension(func_id: u32, input: &[u8], mut output: Option<&mut &mut [u8]>) -> u32 { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let ret_code = { @@ -441,44 +474,43 @@ impl HostFn for HostFnImpl { ret_code.into_u32() } - fn input(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - { - unsafe { sys::input(output.as_mut_ptr(), &mut output_len) }; - } - extract_from_slice(output, output_len as usize); - } - - fn return_value(flags: ReturnFlags, return_value: &[u8]) -> ! { - unsafe { sys::seal_return(flags.bits(), return_value.as_ptr(), return_value.len() as u32) } - panic!("seal_return does not return"); - } - + #[cfg(feature = "unstable-api")] fn call_runtime(call: &[u8]) -> Result { let ret_code = unsafe { sys::call_runtime(call.as_ptr(), call.len() as u32) }; ret_code.into() } - impl_wrapper_for! { - [u8; 32] => block_number, balance, value_transferred, now, minimum_balance, chain_id; - [u8; 20] => address, caller, origin; + #[cfg(feature = "unstable-api")] + fn caller_is_origin() -> bool { + let ret_val = unsafe { sys::caller_is_origin() }; + ret_val.into_bool() } - fn weight_left(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - unsafe { sys::weight_left(output.as_mut_ptr(), &mut output_len) } - extract_from_slice(output, output_len as usize) + #[cfg(feature = "unstable-api")] + fn caller_is_root() -> u32 { + unsafe { sys::caller_is_root() }.into_u32() } - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { - unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; + #[cfg(feature = "unstable-api")] + fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option { + let ret_code = unsafe { sys::clear_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; + ret_code.into() } - impl_hash_fn!(sha2_256, 32); - impl_hash_fn!(keccak_256, 32); - impl_hash_fn!(blake2_256, 32); - impl_hash_fn!(blake2_128, 16); + #[cfg(feature = "unstable-api")] + fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option { + let ret_code = + unsafe { sys::contains_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + #[cfg(feature = "unstable-api")] + fn debug_message(str: &[u8]) -> Result { + let ret_code = unsafe { sys::debug_message(str.as_ptr(), str.len() as u32) }; + ret_code.into() + } + + #[cfg(feature = "unstable-api")] fn ecdsa_recover( signature: &[u8; 65], message_hash: &[u8; 32], @@ -490,77 +522,96 @@ impl HostFn for HostFnImpl { ret_code.into() } + #[cfg(feature = "unstable-api")] fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result { let ret_code = unsafe { sys::ecdsa_to_eth_address(pubkey.as_ptr(), output.as_mut_ptr()) }; ret_code.into() } - fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result { - let ret_code = unsafe { - sys::sr25519_verify( - signature.as_ptr(), - pub_key.as_ptr(), - message.len() as u32, - message.as_ptr(), - ) - }; - ret_code.into() - } + #[cfg(feature = "unstable-api")] + impl_hash_fn!(sha2_256, 32); + #[cfg(feature = "unstable-api")] + impl_hash_fn!(blake2_256, 32); + #[cfg(feature = "unstable-api")] + impl_hash_fn!(blake2_128, 16); + #[cfg(feature = "unstable-api")] fn is_contract(address: &[u8; 20]) -> bool { let ret_val = unsafe { sys::is_contract(address.as_ptr()) }; ret_val.into_bool() } - fn get_immutable_data(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - unsafe { sys::get_immutable_data(output.as_mut_ptr(), &mut output_len) }; - extract_from_slice(output, output_len as usize); - } - - fn set_immutable_data(data: &[u8]) { - unsafe { sys::set_immutable_data(data.as_ptr(), data.len() as u32) } - } - - fn balance_of(address: &[u8; 20], output: &mut [u8; 32]) { - unsafe { sys::balance_of(address.as_ptr(), output.as_mut_ptr()) }; - } - - fn caller_is_origin() -> bool { - let ret_val = unsafe { sys::caller_is_origin() }; - ret_val.into_bool() + #[cfg(feature = "unstable-api")] + fn lock_delegate_dependency(code_hash: &[u8; 32]) { + unsafe { sys::lock_delegate_dependency(code_hash.as_ptr()) } } - fn set_code_hash(code_hash: &[u8; 32]) -> Result { - let ret_val = unsafe { sys::set_code_hash(code_hash.as_ptr()) }; - ret_val.into() + #[cfg(feature = "unstable-api")] + fn own_code_hash(output: &mut [u8; 32]) { + unsafe { sys::own_code_hash(output.as_mut_ptr()) } } - fn code_hash(address: &[u8; 20], output: &mut [u8; 32]) { - unsafe { sys::code_hash(address.as_ptr(), output.as_mut_ptr()) } + #[cfg(feature = "unstable-api")] + fn set_code_hash(code_hash: &[u8; 32]) { + unsafe { sys::set_code_hash(code_hash.as_ptr()) } } - fn code_size(address: &[u8; 20], output: &mut [u8; 32]) { - unsafe { sys::code_size(address.as_ptr(), output.as_mut_ptr()) } + #[cfg(feature = "unstable-api")] + fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result { + let ret_code = unsafe { + sys::sr25519_verify( + signature.as_ptr(), + pub_key.as_ptr(), + message.len() as u32, + message.as_ptr(), + ) + }; + ret_code.into() } - fn own_code_hash(output: &mut [u8; 32]) { - unsafe { sys::own_code_hash(output.as_mut_ptr()) } + #[cfg(feature = "unstable-api")] + fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + sys::take_storage( + flags.bits(), + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into() } - fn lock_delegate_dependency(code_hash: &[u8; 32]) { - unsafe { sys::lock_delegate_dependency(code_hash.as_ptr()) } + #[cfg(feature = "unstable-api")] + fn terminate(beneficiary: &[u8; 20]) -> ! { + unsafe { sys::terminate(beneficiary.as_ptr()) } + panic!("terminate does not return"); } + #[cfg(feature = "unstable-api")] fn unlock_delegate_dependency(code_hash: &[u8; 32]) { unsafe { sys::unlock_delegate_dependency(code_hash.as_ptr()) } } + #[cfg(feature = "unstable-api")] + fn weight_left(output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { sys::weight_left(output.as_mut_ptr(), &mut output_len) } + extract_from_slice(output, output_len as usize) + } + + #[cfg(feature = "unstable-api")] fn xcm_execute(msg: &[u8]) -> Result { let ret_code = unsafe { sys::xcm_execute(msg.as_ptr(), msg.len() as _) }; ret_code.into() } + #[cfg(feature = "unstable-api")] fn xcm_send(dest: &[u8], msg: &[u8], output: &mut [u8; 32]) -> Result { let ret_code = unsafe { sys::xcm_send( @@ -573,20 +624,4 @@ impl HostFn for HostFnImpl { }; ret_code.into() } - - fn return_data_size(output: &mut [u8; 32]) { - unsafe { sys::return_data_size(output.as_mut_ptr()) }; - } - - fn return_data_copy(output: &mut &mut [u8], offset: u32) { - let mut output_len = output.len() as u32; - { - unsafe { sys::return_data_copy(output.as_mut_ptr(), &mut output_len, offset) }; - } - extract_from_slice(output, output_len as usize); - } - - fn block_hash(block_number_ptr: &[u8; 32], output: &mut [u8; 32]) { - unsafe { sys::block_hash(block_number_ptr.as_ptr(), output.as_mut_ptr()) }; - } } diff --git a/substrate/frame/revive/uapi/src/lib.rs b/substrate/frame/revive/uapi/src/lib.rs index e660ce36ef75..14a5e3f28889 100644 --- a/substrate/frame/revive/uapi/src/lib.rs +++ b/substrate/frame/revive/uapi/src/lib.rs @@ -65,6 +65,12 @@ impl From for u32 { } } +impl From for u64 { + fn from(error: ReturnErrorCode) -> Self { + u32::from(error).into() + } +} + define_error_codes! { /// The called function trapped and has its state changes reverted. /// In this case no output buffer is returned. @@ -79,23 +85,21 @@ define_error_codes! { /// Transfer failed for other not further specified reason. Most probably /// reserved or locked balance of the sender that was preventing the transfer. TransferFailed = 4, - /// No code could be found at the supplied code hash. - CodeNotFound = 5, - /// The account that was called is no contract. - NotCallable = 6, /// The call to `debug_message` had no effect because debug message /// recording was disabled. - LoggingDisabled = 7, + LoggingDisabled = 5, /// The call dispatched by `call_runtime` was executed but returned an error. - CallRuntimeFailed = 8, + CallRuntimeFailed = 6, /// ECDSA public key recovery failed. Most probably wrong recovery id or signature. - EcdsaRecoveryFailed = 9, + EcdsaRecoveryFailed = 7, /// sr25519 signature verification failed. - Sr25519VerifyFailed = 10, + Sr25519VerifyFailed = 8, /// The `xcm_execute` call failed. - XcmExecutionFailed = 11, + XcmExecutionFailed = 9, /// The `xcm_send` call failed. - XcmSendFailed = 12, + XcmSendFailed = 10, + /// The subcall ran out of weight or storage deposit. + OutOfResources = 11, } /// The raw return code returned by the host side. diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index ec1ad8249514..aaf3456272fa 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -138,6 +138,7 @@ impl pallet_proxy::Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } /// The calls that can always bypass safe-mode. diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index d0a14fc73d64..ff40e8ef8abf 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -17,25 +17,23 @@ //! Scheduler pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::{ ensure, traits::{schedule::Priority, BoundedInline}, weights::WeightMeter, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use frame_system::{EventRecord, RawOrigin}; -use crate::Pallet as Scheduler; -use frame_system::{Call as SystemCall, EventRecord}; +use crate::*; -const SEED: u32 = 0; +type SystemCall = frame_system::Call; +type SystemOrigin = ::RuntimeOrigin; +const SEED: u32 = 0; const BLOCK_NUMBER: u32 = 2; -type SystemOrigin = ::RuntimeOrigin; - fn assert_last_event(generic_event: ::RuntimeEvent) { let events = frame_system::Pallet::::events(); let system_event: ::RuntimeEvent = generic_event.into(); @@ -61,7 +59,7 @@ fn fill_schedule( let call = make_call::(None); let period = Some(((i + 100).into(), 100)); let name = u32_to_name(i); - Scheduler::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; + Pallet::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; } ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); Ok(()) @@ -134,107 +132,160 @@ fn make_origin(signed: bool) -> ::PalletsOrigin { } } -benchmarks! { +#[benchmarks] +mod benchmarks { + use super::*; + // `service_agendas` when no work is done. - service_agendas_base { - let now = BlockNumberFor::::from(BLOCK_NUMBER); + #[benchmark] + fn service_agendas_base() { + let now = BLOCK_NUMBER.into(); IncompleteSince::::put(now - One::one()); - }: { - Scheduler::::service_agendas(&mut WeightMeter::new(), now, 0); - } verify { + + #[block] + { + Pallet::::service_agendas(&mut WeightMeter::new(), now, 0); + } + assert_eq!(IncompleteSince::::get(), Some(now - One::one())); } // `service_agenda` when no work is done. - service_agenda_base { + #[benchmark] + fn service_agenda_base( + s: Linear<0, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let now = BLOCK_NUMBER.into(); - let s in 0 .. T::MaxScheduledPerBlock::get(); fill_schedule::(now, s)?; let mut executed = 0; - }: { - Scheduler::::service_agenda(&mut WeightMeter::new(), &mut executed, now, now, 0); - } verify { + + #[block] + { + Pallet::::service_agenda(&mut WeightMeter::new(), &mut executed, now, now, 0); + } + assert_eq!(executed, 0); + + Ok(()) } // `service_task` when the task is a non-periodic, non-named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_base { + #[benchmark] + fn service_task_base() { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { - //assert_eq!(result, Ok(())); + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(_result.is_ok()); } // `service_task` when the task is a non-periodic, non-named, fetched call (with a known // preimage length) and which is not dispatched (e.g. due to being overweight). - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { // Use measured PoV size for the Preimages since we pass in a length witness. Preimage::PreimageFor: Measured - }] - service_task_fetched { - let s in (BoundedInline::bound() as u32) .. (T::Preimages::MAX_LENGTH as u32); + })] + fn service_task_fetched( + s: Linear<{ BoundedInline::bound() as u32 }, { T::Preimages::MAX_LENGTH as u32 }>, + ) { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, Some(s), 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `service_task` when the task is a non-periodic, named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_named { + #[benchmark] + fn service_task_named() { let now = BLOCK_NUMBER.into(); let task = make_task::(false, true, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `service_task` when the task is a periodic, non-named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_periodic { + #[benchmark] + fn service_task_periodic() { let now = BLOCK_NUMBER.into(); let task = make_task::(true, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `execute_dispatch` when the origin is `Signed`, not counting the dispatchable's weight. - execute_dispatch_signed { + #[benchmark] + fn execute_dispatch_signed() -> Result<(), BenchmarkError> { let mut counter = WeightMeter::new(); let origin = make_origin::(true); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); - } - verify { + let call = T::Preimages::realize(&make_call::(None))?.0; + let result; + + #[block] + { + result = Pallet::::execute_dispatch(&mut counter, origin, call); + } + + assert!(result.is_ok()); + + Ok(()) } // `execute_dispatch` when the origin is not `Signed`, not counting the dispatchable's weight. - execute_dispatch_unsigned { + #[benchmark] + fn execute_dispatch_unsigned() -> Result<(), BenchmarkError> { let mut counter = WeightMeter::new(); let origin = make_origin::(false); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); - } - verify { + let call = T::Preimages::realize(&make_call::(None))?.0; + let result; + + #[block] + { + result = Pallet::::execute_dispatch(&mut counter, origin, call); + } + + assert!(result.is_ok()); + + Ok(()) } - schedule { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + #[benchmark] + fn schedule( + s: Linear<0, { T::MaxScheduledPerBlock::get() - 1 }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); let periodic = Some((BlockNumberFor::::one(), 100)); let priority = 0; @@ -242,24 +293,27 @@ benchmarks! { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, when, periodic, priority, call) - verify { - ensure!( - Agenda::::get(when).len() == (s + 1) as usize, - "didn't add to schedule" - ); + + #[extrinsic_call] + _(RawOrigin::Root, when, periodic, priority, call); + + ensure!(Agenda::::get(when).len() == s as usize + 1, "didn't add to schedule"); + + Ok(()) } - cancel { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn cancel(s: Linear<1, { T::MaxScheduledPerBlock::get() }>) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; assert_eq!(Agenda::::get(when).len(), s as usize); let schedule_origin = T::ScheduleOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _>(schedule_origin, when, 0) - verify { + + #[extrinsic_call] + _(schedule_origin as SystemOrigin, when, 0); + ensure!( s == 1 || Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup if more than 1 task scheduled for `when`" @@ -273,10 +327,14 @@ benchmarks! { s > 1 || Agenda::::get(when).len() == 0, "remove from schedule if only 1 task scheduled for `when`" ); + + Ok(()) } - schedule_named { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + #[benchmark] + fn schedule_named( + s: Linear<0, { T::MaxScheduledPerBlock::get() - 1 }>, + ) -> Result<(), BenchmarkError> { let id = u32_to_name(s); let when = BLOCK_NUMBER.into(); let periodic = Some((BlockNumberFor::::one(), 100)); @@ -285,21 +343,26 @@ benchmarks! { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, id, when, periodic, priority, call) - verify { - ensure!( - Agenda::::get(when).len() == (s + 1) as usize, - "didn't add to schedule" - ); + + #[extrinsic_call] + _(RawOrigin::Root, id, when, periodic, priority, call); + + ensure!(Agenda::::get(when).len() == s as usize + 1, "didn't add to schedule"); + + Ok(()) } - cancel_named { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn cancel_named( + s: Linear<1, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, u32_to_name(0)) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, u32_to_name(0)); + ensure!( s == 1 || Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup if more than 1 task scheduled for `when`" @@ -313,33 +376,49 @@ benchmarks! { s > 1 || Agenda::::get(when).len() == 0, "remove from schedule if only 1 task scheduled for `when`" ); + + Ok(()) } - schedule_retry { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn schedule_retry( + s: Linear<1, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; let name = u32_to_name(s - 1); let address = Lookup::::get(name).unwrap(); - let period: BlockNumberFor = 1u32.into(); - let root: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); + let period: BlockNumberFor = 1_u32.into(); let retry_config = RetryConfig { total_retries: 10, remaining: 10, period }; Retries::::insert(address, retry_config); let (mut when, index) = address; let task = Agenda::::get(when)[index as usize].clone().unwrap(); let mut weight_counter = WeightMeter::with_limit(T::MaximumWeight::get()); - }: { - Scheduler::::schedule_retry(&mut weight_counter, when, when, index, &task, retry_config); - } verify { + + #[block] + { + Pallet::::schedule_retry( + &mut weight_counter, + when, + when, + index, + &task, + retry_config, + ); + } + when = when + BlockNumberFor::::one(); assert_eq!( Retries::::get((when, 0)), Some(RetryConfig { total_retries: 10, remaining: 9, period }) ); + + Ok(()) } - set_retry { + #[benchmark] + fn set_retry() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -348,8 +427,10 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - }: _(RawOrigin::Root, (when, index), 10, period) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, (when, index), 10, period); + assert_eq!( Retries::::get((when, index)), Some(RetryConfig { total_retries: 10, remaining: 10, period }) @@ -357,9 +438,12 @@ benchmarks! { assert_last_event::( Event::RetrySet { task: address, id: None, period, retries: 10 }.into(), ); + + Ok(()) } - set_retry_named { + #[benchmark] + fn set_retry_named() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -368,8 +452,10 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - }: _(RawOrigin::Root, name, 10, period) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, name, 10, period); + assert_eq!( Retries::::get((when, index)), Some(RetryConfig { total_retries: 10, remaining: 10, period }) @@ -377,9 +463,12 @@ benchmarks! { assert_last_event::( Event::RetrySet { task: address, id: Some(name), period, retries: 10 }.into(), ); + + Ok(()) } - cancel_retry { + #[benchmark] + fn cancel_retry() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -388,16 +477,19 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - assert!(Scheduler::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); - }: _(RawOrigin::Root, (when, index)) - verify { + assert!(Pallet::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); + + #[extrinsic_call] + _(RawOrigin::Root, (when, index)); + assert!(!Retries::::contains_key((when, index))); - assert_last_event::( - Event::RetryCancelled { task: address, id: None }.into(), - ); + assert_last_event::(Event::RetryCancelled { task: address, id: None }.into()); + + Ok(()) } - cancel_retry_named { + #[benchmark] + fn cancel_retry_named() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -406,14 +498,20 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - assert!(Scheduler::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); - }: _(RawOrigin::Root, name) - verify { + assert!(Pallet::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); + + #[extrinsic_call] + _(RawOrigin::Root, name); + assert!(!Retries::::contains_key((when, index))); - assert_last_event::( - Event::RetryCancelled { task: address, id: Some(name) }.into(), - ); + assert_last_event::(Event::RetryCancelled { task: address, id: Some(name) }.into()); + + Ok(()) } - impl_benchmark_test_suite!(Scheduler, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs index 9ba47b34ed7a..9789b6bb593d 100644 --- a/substrate/frame/session/benchmarking/src/inner.rs +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -22,7 +22,7 @@ use alloc::{vec, vec::Vec}; use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; use codec::Decode; -use frame_benchmarking::v1::benchmarks; +use frame_benchmarking::v2::*; use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; @@ -45,8 +45,12 @@ impl OnInitialize> for Pallet { } } -benchmarks! { - set_keys { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn set_keys() -> Result<(), BenchmarkError> { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -58,13 +62,19 @@ benchmarks! { let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; + let proof: Vec = vec![0, 1, 2, 3]; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller), keys, proof) - purge_keys { + #[extrinsic_call] + _(RawOrigin::Signed(v_controller), keys, proof); + + Ok(()) + } + + #[benchmark] + fn purge_keys() -> Result<(), BenchmarkError> { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -75,30 +85,33 @@ benchmarks! { )?; let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; + let proof: Vec = vec![0, 1, 2, 3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller)) - #[extra] - check_membership_proof_current_session { - let n in 2 .. MAX_VALIDATORS as u32; + #[extrinsic_call] + _(RawOrigin::Signed(v_controller)); + Ok(()) + } + + #[benchmark(extra)] + fn check_membership_proof_current_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { + + #[block] + { + Historical::::check_proof(key, key_owner_proof1); + } + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - #[extra] - check_membership_proof_historical_session { - let n in 2 .. MAX_VALIDATORS as u32; - + #[benchmark(extra)] + fn check_membership_proof_historical_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); // skip to the next session so that the session is historical @@ -106,14 +119,21 @@ benchmarks! { Session::::rotate_session(); let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { + + #[block] + { + Historical::::check_proof(key, key_owner_proof1); + } + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(), + crate::mock::Test, + extra = false + ); } /// Sets up the benchmark for checking a membership proof. It creates the given diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 2aec58cceded..346cd04c0fa9 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -27,7 +27,7 @@ use frame_support::{ derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; -use sp_runtime::{traits::IdentityLookup, BuildStorage}; +use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId}; type AccountId = u64; type Nonce = u32; @@ -42,6 +42,7 @@ frame_support::construct_runtime!( Balances: pallet_balances, Staking: pallet_staking, Session: pallet_session, + Historical: pallet_session::historical } ); @@ -79,7 +80,8 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + // corresponds to the opaque key id above + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index 0ca36ca8545a..8031ddf96e6a 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -219,8 +219,8 @@ pub mod prelude { /// Runtime traits #[doc(no_inline)] pub use sp_runtime::traits::{ - Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion, Saturating, StaticLookup, - TrailingZeroInput, + BlockNumberProvider, Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion, + Saturating, StaticLookup, TrailingZeroInput, }; /// Other error/result types for runtime @@ -391,7 +391,7 @@ pub mod runtime { LOCAL_TESTNET_RUNTIME_PRESET, }; pub use sp_inherents::{CheckInherentsResult, InherentData}; - pub use sp_keyring::AccountKeyring; + pub use sp_keyring::Sr25519Keyring; pub use sp_runtime::{ApplyExtrinsicResult, ExtrinsicInclusionMode}; } diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 3fe5abb81031..61323b70b33d 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -249,13 +249,13 @@ pub mod pallet { if limits.item.is_zero() || limits.size.is_zero() { // handle this minor edge case, else we would call `migrate_tick` at least once. log!(warn, "limits are zero. stopping"); - return Ok(()) + return Ok(()); } while !self.exhausted(limits) && !self.finished() { if let Err(e) = self.migrate_tick() { log!(error, "migrate_until_exhaustion failed: {:?}", e); - return Err(e) + return Err(e); } } @@ -332,7 +332,7 @@ pub mod pallet { _ => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate child key."); - return Ok(()) + return Ok(()); }, }; @@ -374,7 +374,7 @@ pub mod pallet { Progress::Complete => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate top key."); - return Ok(()) + return Ok(()); }, }; @@ -669,7 +669,7 @@ pub mod pallet { // ensure that the migration witness data was correct. if real_size_upper < task.dyn_size { Self::slash(who, deposit)?; - return Ok(().into()) + return Ok(().into()); } Self::deposit_event(Event::::Migrated { @@ -957,6 +957,7 @@ pub mod pallet { mod benchmarks { use super::{pallet::Pallet as StateTrieMigration, *}; use alloc::vec; + use frame_benchmarking::v2::*; use frame_support::traits::fungible::{Inspect, Mutate}; // The size of the key seemingly makes no difference in the read/write time, so we make it @@ -970,8 +971,12 @@ mod benchmarks { stash } - frame_benchmarking::benchmarks! { - continue_migrate { + #[benchmarks] + mod inner_benchmarks { + use super::*; + + #[benchmark] + fn continue_migrate() -> Result<(), BenchmarkError> { // note that this benchmark should migrate nothing, as we only want the overhead weight // of the bookkeeping, and the migration cost itself is noted via the `dynamic_weight` // function. @@ -980,116 +985,151 @@ mod benchmarks { let stash = set_balance_for_deposit::(&caller, null.item); // Allow signed migrations. SignedMigrationMaxLimits::::put(MigrationLimits { size: 1024, item: 5 }); - }: _(frame_system::RawOrigin::Signed(caller.clone()), null, 0, StateTrieMigration::::migration_process()) - verify { + + #[extrinsic_call] + _( + frame_system::RawOrigin::Signed(caller.clone()), + null, + 0, + StateTrieMigration::::migration_process(), + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash) + assert_eq!(T::Currency::balance(&caller), stash); + + Ok(()) } - continue_migrate_wrong_witness { + #[benchmark] + fn continue_migrate_wrong_witness() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - let bad_witness = MigrationTask { progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), ..Default::default() }; - }: { - assert!( - StateTrieMigration::::continue_migrate( + let bad_witness = MigrationTask { + progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), + ..Default::default() + }; + #[block] + { + assert!(StateTrieMigration::::continue_migrate( frame_system::RawOrigin::Signed(caller).into(), null, 0, bad_witness, ) - .is_err() - ) - } - verify { - assert_eq!(StateTrieMigration::::migration_process(), Default::default()) + .is_err()); + } + + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); + + Ok(()) } - migrate_custom_top_success { + #[benchmark] + fn migrate_custom_top_success() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); - }: migrate_custom_top(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0) - verify { + #[extrinsic_call] + migrate_custom_top( + frame_system::RawOrigin::Signed(caller.clone()), + Default::default(), + 0, + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash) + assert_eq!(T::Currency::balance(&caller), stash); + Ok(()) } - migrate_custom_top_fail { + #[benchmark] + fn migrate_custom_top_fail() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::storage::set(b"foo", vec![1u8;33].as_ref()); - }: { - assert!( - StateTrieMigration::::migrate_custom_top( + sp_io::storage::set(b"foo", vec![1u8; 33].as_ref()); + #[block] + { + assert!(StateTrieMigration::::migrate_custom_top( frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ).is_ok() - ); + ) + .is_ok()); + + frame_system::Pallet::::assert_last_event( + ::RuntimeEvent::from(crate::Event::Slashed { + who: caller.clone(), + amount: StateTrieMigration::::calculate_deposit_for(1u32), + }) + .into(), + ); + } - frame_system::Pallet::::assert_last_event( - ::RuntimeEvent::from(crate::Event::Slashed { - who: caller.clone(), - amount: StateTrieMigration::::calculate_deposit_for(1u32), - }).into(), - ); - } - verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash) + assert!(T::Currency::balance(&caller) < stash); + + Ok(()) } - migrate_custom_child_success { + #[benchmark] + fn migrate_custom_child_success() -> Result<(), BenchmarkError> { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 0); - }: migrate_custom_child( - frame_system::RawOrigin::Signed(caller.clone()), - StateTrieMigration::::childify(Default::default()), - Default::default(), - 0 - ) - verify { + + #[extrinsic_call] + migrate_custom_child( + frame_system::RawOrigin::Signed(caller.clone()), + StateTrieMigration::::childify(Default::default()), + Default::default(), + 0, + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); assert_eq!(T::Currency::balance(&caller), stash); + + Ok(()) } - migrate_custom_child_fail { + #[benchmark] + fn migrate_custom_child_fail() -> Result<(), BenchmarkError> { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 1); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::default_child_storage::set(b"top", b"foo", vec![1u8;33].as_ref()); - }: { - assert!( - StateTrieMigration::::migrate_custom_child( + sp_io::default_child_storage::set(b"top", b"foo", vec![1u8; 33].as_ref()); + + #[block] + { + assert!(StateTrieMigration::::migrate_custom_child( frame_system::RawOrigin::Signed(caller.clone()).into(), StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ).is_ok() - ) - } - verify { + ) + .is_ok()); + } assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash) + assert!(T::Currency::balance(&caller) < stash); + Ok(()) } - process_top_key { - let v in 1 .. (4 * 1024 * 1024); - + #[benchmark] + fn process_top_key(v: Linear<1, { 4 * 1024 * 1024 }>) -> Result<(), BenchmarkError> { let value = alloc::vec![1u8; v as usize]; sp_io::storage::set(KEY, &value); - }: { - let data = sp_io::storage::get(KEY).unwrap(); - sp_io::storage::set(KEY, &data); - let _next = sp_io::storage::next_key(KEY); - assert_eq!(data, value); + #[block] + { + let data = sp_io::storage::get(KEY).unwrap(); + sp_io::storage::set(KEY, &data); + let _next = sp_io::storage::next_key(KEY); + assert_eq!(data, value); + } + + Ok(()) } impl_benchmark_test_suite!( @@ -1741,7 +1781,7 @@ pub(crate) mod remote_tests { let ((finished, weight), proof) = ext.execute_and_prove(|| { let weight = run_to_block::(now + One::one()).1; if StateTrieMigration::::migration_process().finished() { - return (true, weight) + return (true, weight); } duration += One::one(); now += One::one(); @@ -1768,7 +1808,7 @@ pub(crate) mod remote_tests { ext.commit_all().unwrap(); if finished { - break + break; } } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index d7da034b3492..4348bd275605 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -28,15 +28,14 @@ scale-info = { features = [ ], workspace = true } frame-metadata = { features = [ "current", + "unstable", ], workspace = true } sp-api = { features = [ "frame-metadata", ], workspace = true } sp-std = { workspace = true } sp-io = { workspace = true } -sp-runtime = { features = [ - "serde", -], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } sp-tracing = { workspace = true } sp-core = { workspace = true } sp-arithmetic = { workspace = true } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index c12fc20bc8b8..0b3bd5168865 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -113,11 +113,13 @@ pub fn expand_runtime_metadata( <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #scrate::__private::metadata_ir::MetadataIR { pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { ty, - version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, + versions: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSIONS.into_iter().map(|ref_version| *ref_version).collect(), address_ty, call_ty, signature_ty, diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 17042c248780..087faf37252d 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -466,7 +466,6 @@ fn construct_runtime_final_expansion( // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` // is called. - #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] @@ -477,6 +476,8 @@ fn construct_runtime_final_expansion( #[doc(hidden)] impl InternalConstructRuntime for &#name {} + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #outer_event #outer_error diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index 52f57cd2cd8b..1397b7266a18 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -21,7 +21,7 @@ use crate::{ }; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; -use syn::{punctuated::Punctuated, token, Error}; +use syn::{punctuated::Punctuated, spanned::Spanned, token, Error}; impl Pallet { pub fn try_from( @@ -78,7 +78,18 @@ impl Pallet { }) .collect(); - let cfg_pattern = vec![]; + let cfg_pattern = item + .attrs + .iter() + .filter(|attr| attr.path().segments.first().map_or(false, |s| s.ident == "cfg")) + .map(|attr| { + attr.parse_args_with(|input: syn::parse::ParseStream| { + let input = input.parse::()?; + cfg_expr::Expression::parse(&input.to_string()) + .map_err(|e| syn::Error::new(attr.span(), e.to_string())) + }) + }) + .collect::>>()?; let docs = get_doc_literals(&item.attrs); diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 2187ee22b395..17ee3130b741 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -19,7 +19,7 @@ static_assertions = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -frame-metadata = { features = ["current"], workspace = true } +frame-metadata = { features = ["current", "unstable"], workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } sp-io = { workspace = true } diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index b0b83f772499..9df1f461bba2 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -53,6 +53,9 @@ parameter_types! { /// Latest stable metadata version used for testing. const LATEST_METADATA_VERSION: u32 = 15; +/// Unstable metadata version. +const UNSTABLE_METADATA_VERSION: u32 = u32::MAX; + pub struct SomeType1; impl From for u64 { fn from(_t: SomeType1) -> Self { @@ -799,20 +802,43 @@ where } } -frame_support::construct_runtime!( - pub struct Runtime { - // Exclude part `Storage` in order not to check its metadata in tests. - System: frame_system exclude_parts { Pallet, Storage }, - Example: pallet, - Example2: pallet2 exclude_parts { Call }, - #[cfg(feature = "frame-feature-testing")] - Example3: pallet3, - Example4: pallet4 use_parts { Call }, +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; - #[cfg(feature = "frame-feature-testing-2")] - Example5: pallet5, - } -); + #[runtime::pallet_index(0)] + pub type System = frame_system + Call + Event; + + #[runtime::pallet_index(1)] + pub type Example = pallet; + + #[runtime::pallet_index(2)] + #[runtime::disable_call] + pub type Example2 = pallet2; + + #[cfg(feature = "frame-feature-testing")] + #[runtime::pallet_index(3)] + pub type Example3 = pallet3; + + #[runtime::pallet_index(4)] + pub type Example4 = pallet4; + + #[cfg(feature = "frame-feature-testing-2")] + #[runtime::pallet_index(5)] + pub type Example5 = pallet5; +} // Test that the part `RuntimeCall` is excluded from Example2 and included in Example4. fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { @@ -1847,6 +1873,16 @@ fn metadata() { error: None, docs: vec![" Test that the supertrait check works when we pass some parameter to the `frame_system::Config`."], }, + PalletMetadata { + index: 4, + name: "Example4", + storage: None, + calls: Some(meta_type::>().into()), + event: None, + constants: vec![], + error: None, + docs: vec![], + }, #[cfg(feature = "frame-feature-testing-2")] PalletMetadata { index: 5, @@ -1944,7 +1980,10 @@ fn metadata_at_version() { #[test] fn metadata_versions() { - assert_eq!(vec![14, LATEST_METADATA_VERSION], Runtime::metadata_versions()); + assert_eq!( + vec![14, LATEST_METADATA_VERSION, UNSTABLE_METADATA_VERSION], + Runtime::metadata_versions() + ); } #[test] diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index 7523a415d458..a098643abb91 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -80,34 +80,39 @@ sp_api::decl_runtime_apis! { } } -sp_api::impl_runtime_apis! { - impl self::Api for Runtime { - fn test(_data: u64) { - unimplemented!() - } +// Module to emulate having the implementation in a different file. +mod apis { + use super::{Block, BlockT, Runtime}; - fn something_with_block(_: Block) -> Block { - unimplemented!() - } + sp_api::impl_runtime_apis! { + impl crate::Api for Runtime { + fn test(_data: u64) { + unimplemented!() + } - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } + fn something_with_block(_: Block) -> Block { + unimplemented!() + } - fn same_name() {} + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } - fn wild_card(_: u32) {} - } + fn same_name() {} - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() + fn wild_card(_: u32) {} } - fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - unimplemented!() + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + unimplemented!() + } } } } diff --git a/substrate/frame/transaction-payment/src/payment.rs b/substrate/frame/transaction-payment/src/payment.rs index 4b39cd3fe53b..b8a047fee3e6 100644 --- a/substrate/frame/transaction-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/src/payment.rs @@ -155,14 +155,15 @@ where if let Some(paid) = already_withdrawn { // Calculate how much refund we should return let refund_amount = paid.peek().saturating_sub(corrected_fee); - // refund to the the account that paid the fees if it exists. otherwise, don't refind - // anything. - let refund_imbalance = if F::total_balance(who) > F::Balance::zero() { - F::deposit(who, refund_amount, Precision::BestEffort) - .unwrap_or_else(|_| Debt::::zero()) - } else { - Debt::::zero() - }; + // Refund to the the account that paid the fees if it exists & refund is non-zero. + // Otherwise, don't refund anything. + let refund_imbalance = + if refund_amount > Zero::zero() && F::total_balance(who) > F::Balance::zero() { + F::deposit(who, refund_amount, Precision::BestEffort) + .unwrap_or_else(|_| Debt::::zero()) + } else { + Debt::::zero() + }; // merge the imbalance caused by paying the fees and refunding parts of it again. let adjusted_paid: Credit = paid .offset(refund_imbalance) diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index 572c1d4961dd..bde1bf64728e 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -877,3 +877,40 @@ fn no_fee_and_no_weight_for_other_origins() { assert_eq!(post_info.actual_weight, Some(info.call_weight)); }) } + +#[test] +fn fungible_adapter_no_zero_refund_action() { + type FungibleAdapterT = payment::FungibleAdapter; + + ExtBuilder::default().balance_factor(10).build().execute_with(|| { + System::set_block_number(10); + + let dummy_acc = 1; + let (actual_fee, no_tip) = (10, 0); + let already_paid = >::withdraw_fee( + &dummy_acc, + CALL, + &CALL.get_dispatch_info(), + actual_fee, + no_tip, + ).expect("Account must have enough funds."); + + // Correction action with no expected side effect. + assert!(>::correct_and_deposit_fee( + &dummy_acc, + &CALL.get_dispatch_info(), + &default_post_info(), + actual_fee, + no_tip, + already_paid, + ).is_ok()); + + // Ensure no zero amount deposit event is emitted. + let events = System::events(); + assert!(!events + .iter() + .any(|record| matches!(record.event, RuntimeEvent::Balances(pallet_balances::Event::Deposit { amount, .. }) if amount.is_zero())), + "No zero amount deposit amount event should be emitted.", + ); + }); +} diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index a03ee149db9b..a11723a27b2c 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -198,7 +198,7 @@ mod benchmarks { None, ); - let valid_from = frame_system::Pallet::::block_number(); + let valid_from = T::BlockNumberProvider::current_block_number(); let expire_at = valid_from.saturating_add(T::PayoutPeriod::get()); assert_last_event::( Event::AssetSpendApproved { diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index faacda1c0783..281012ffb4c9 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -106,7 +106,7 @@ use frame_support::{ weights::Weight, BoundedVec, PalletId, }; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::pallet_prelude::BlockNumberFor as SystemBlockNumberFor; pub use pallet::*; pub use weights::WeightInfo; @@ -122,6 +122,8 @@ pub type NegativeImbalanceOf = <>::Currency as Currenc >>::NegativeImbalance; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BeneficiaryLookupOf = <>::BeneficiaryLookup as StaticLookup>::Source; +pub type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. /// There is an expectation that the implementer of this trait will correctly manage @@ -202,7 +204,7 @@ pub mod pallet { pallet_prelude::*, traits::tokens::{ConversionFromAssetBalance, PaymentStatus}, }; - use frame_system::pallet_prelude::*; + use frame_system::pallet_prelude::{ensure_signed, OriginFor}; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -221,7 +223,7 @@ pub mod pallet { /// Period between successive spends. #[pallet::constant] - type SpendPeriod: Get>; + type SpendPeriod: Get>; /// Percentage of spare funds (if any) that are burnt per spend period. #[pallet::constant] @@ -277,14 +279,14 @@ pub mod pallet { /// The period during which an approved treasury spend has to be claimed. #[pallet::constant] - type PayoutPeriod: Get>; + type PayoutPeriod: Get>; /// Helper type for benchmarks. #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper: ArgumentsFactory; /// Provider for the block number. Normally this is the `frame_system` pallet. - type BlockNumberProvider: BlockNumberProvider>; + type BlockNumberProvider: BlockNumberProvider; } /// DEPRECATED: associated with `spend_local` call and will be removed in May 2025. @@ -335,7 +337,7 @@ pub mod pallet { T::AssetKind, AssetBalanceOf, T::Beneficiary, - BlockNumberFor, + BlockNumberFor, ::Id, >, OptionQuery, @@ -343,7 +345,7 @@ pub mod pallet { /// The blocknumber for the last triggered spend period. #[pallet::storage] - pub(crate) type LastSpendPeriod = StorageValue<_, BlockNumberFor, OptionQuery>; + pub(crate) type LastSpendPeriod = StorageValue<_, BlockNumberFor, OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] @@ -391,8 +393,8 @@ pub mod pallet { asset_kind: T::AssetKind, amount: AssetBalanceOf, beneficiary: T::Beneficiary, - valid_from: BlockNumberFor, - expire_at: BlockNumberFor, + valid_from: BlockNumberFor, + expire_at: BlockNumberFor, }, /// An approved spend was voided. AssetSpendVoided { index: SpendIndex }, @@ -434,10 +436,10 @@ pub mod pallet { } #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { + impl, I: 'static> Hooks> for Pallet { /// ## Complexity /// - `O(A)` where `A` is the number of approvals - fn on_initialize(_do_not_use_local_block_number: BlockNumberFor) -> Weight { + fn on_initialize(_do_not_use_local_block_number: SystemBlockNumberFor) -> Weight { let block_number = T::BlockNumberProvider::current_block_number(); let pot = Self::pot(); let deactivated = Deactivated::::get(); @@ -458,7 +460,7 @@ pub mod pallet { // empty. .unwrap_or_else(|| Self::update_last_spend_period()); let blocks_since_last_spend_period = block_number.saturating_sub(last_spend_period); - let safe_spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); + let safe_spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); // Safe because of `max(1)` above. let (spend_periods_passed, extra_blocks) = ( @@ -466,7 +468,7 @@ pub mod pallet { blocks_since_last_spend_period % safe_spend_period, ); let new_last_spend_period = block_number.saturating_sub(extra_blocks); - if spend_periods_passed > BlockNumberFor::::zero() { + if spend_periods_passed > BlockNumberFor::::zero() { Self::spend_funds(spend_periods_passed, new_last_spend_period) } else { Weight::zero() @@ -474,7 +476,7 @@ pub mod pallet { } #[cfg(feature = "try-runtime")] - fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + fn try_state(_: SystemBlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state()?; Ok(()) } @@ -638,7 +640,7 @@ pub mod pallet { asset_kind: Box, #[pallet::compact] amount: AssetBalanceOf, beneficiary: Box>, - valid_from: Option>, + valid_from: Option>, ) -> DispatchResult { let max_amount = T::SpendOrigin::ensure_origin(origin)?; let beneficiary = T::BeneficiaryLookup::lookup(*beneficiary)?; @@ -844,9 +846,9 @@ impl, I: 'static> Pallet { // Backfill the `LastSpendPeriod` storage, assuming that no configuration has changed // since introducing this code. Used specifically for a migration-less switch to populate // `LastSpendPeriod`. - fn update_last_spend_period() -> BlockNumberFor { + fn update_last_spend_period() -> BlockNumberFor { let block_number = T::BlockNumberProvider::current_block_number(); - let spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); + let spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); let time_since_last_spend = block_number % spend_period; // If it happens that this logic runs directly on a spend period block, we need to backdate // to the last spend period so a spend still occurs this block. @@ -889,8 +891,8 @@ impl, I: 'static> Pallet { /// Spend some money! returns number of approvals before spend. pub fn spend_funds( - spend_periods_passed: BlockNumberFor, - new_last_spend_period: BlockNumberFor, + spend_periods_passed: BlockNumberFor, + new_last_spend_period: BlockNumberFor, ) -> Weight { LastSpendPeriod::::put(new_last_spend_period); let mut total_weight = Weight::zero(); diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 84ce45e83528..fd9b3b552ccd 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -105,6 +105,7 @@ impl pallet_proxy::Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { diff --git a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs index 6be396339259..1706f8ca6fbb 100644 --- a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs @@ -298,18 +298,14 @@ pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { #crate_::vec![ #( #metadata, )* ] } } - #[doc(hidden)] - impl InternalImplRuntimeApis for #runtime_name {} } )) } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 1d21f23eb804..27f6dafa24bf 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -21,6 +21,7 @@ sp-version = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-metadata-ir = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } codec = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } @@ -40,5 +41,5 @@ name = "bench" harness = false [features] -"enable-staging-api" = [] +enable-staging-api = [] disable-ui-tests = [] diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index 890cf6eccdbc..2e5a078cb382 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -309,6 +309,8 @@ fn mock_runtime_api_works_with_advanced() { #[test] fn runtime_api_metadata_matches_version_implemented() { + use sp_metadata_ir::InternalImplRuntimeApis; + let rt = Runtime {}; let runtime_metadata = rt.runtime_metadata(); diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 5a524d1c7f4d..0470b8b72aa0 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -99,8 +99,8 @@ fn record_proof_works() { let transaction = Transfer { amount: 1000, nonce: 0, - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), } .into_unchecked_extrinsic(); diff --git a/substrate/primitives/keyring/src/lib.rs b/substrate/primitives/keyring/src/lib.rs index 008c01b150f0..36e77dabd601 100644 --- a/substrate/primitives/keyring/src/lib.rs +++ b/substrate/primitives/keyring/src/lib.rs @@ -32,20 +32,11 @@ pub mod ed25519; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; -/// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, since it tends to be -/// used for accounts (although it may also be used by authorities). -pub use sr25519::Keyring as AccountKeyring; - #[cfg(feature = "bandersnatch-experimental")] pub use bandersnatch::Keyring as BandersnatchKeyring; pub use ed25519::Keyring as Ed25519Keyring; pub use sr25519::Keyring as Sr25519Keyring; -pub mod test { - /// The keyring for use with accounts when using the test runtime. - pub use super::ed25519::Keyring as AccountKeyring; -} - #[derive(Debug)] /// Represents an error that occurs when parsing a string into a `KeyRing`. pub struct ParseKeyringError; diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index d7786347dd02..046441104b88 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-metadata = { features = ["current"], workspace = true } +frame-metadata = { features = ["current", "unstable"], workspace = true } scale-info = { features = ["derive"], workspace = true } [features] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index 4bd13b935afd..dc01f7eaadb3 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -30,6 +30,7 @@ mod types; use frame_metadata::RuntimeMetadataPrefixed; pub use types::*; +mod unstable; mod v14; mod v15; @@ -39,23 +40,33 @@ const V14: u32 = 14; /// Metadata V15. const V15: u32 = 15; +/// Unstable metadata V16. +const UNSTABLE_V16: u32 = u32::MAX; + /// Transform the IR to the specified version. /// /// Use [`supported_versions`] to find supported versions. pub fn into_version(metadata: MetadataIR, version: u32) -> Option { // Note: Unstable metadata version is `u32::MAX` until stabilized. match version { - // Latest stable version. + // Version V14. This needs to be around until the + // deprecation of the `Metadata_metadata` runtime call in favor of + // `Metadata_metadata_at_version. V14 => Some(into_v14(metadata)), - // Unstable metadata. + + // Version V15 - latest stable. V15 => Some(into_latest(metadata)), + + // Unstable metadata under `u32::MAX`. + UNSTABLE_V16 => Some(into_unstable(metadata)), + _ => None, } } /// Returns the supported metadata versions. pub fn supported_versions() -> alloc::vec::Vec { - alloc::vec![V14, V15] + alloc::vec![V14, V15, UNSTABLE_V16] } /// Transform the IR to the latest stable metadata version. @@ -70,6 +81,22 @@ pub fn into_v14(metadata: MetadataIR) -> RuntimeMetadataPrefixed { latest.into() } +/// Transform the IR to unstable metadata version 16. +pub fn into_unstable(metadata: MetadataIR) -> RuntimeMetadataPrefixed { + let latest: frame_metadata::v16::RuntimeMetadataV16 = metadata.into(); + latest.into() +} + +/// INTERNAL USE ONLY +/// +/// Special trait that is used together with `InternalConstructRuntime` by `construct_runtime!` to +/// fetch the runtime api metadata without exploding when there is no runtime api implementation +/// available. +#[doc(hidden)] +pub trait InternalImplRuntimeApis { + fn runtime_metadata(&self) -> alloc::vec::Vec; +} + #[cfg(test)] mod test { use super::*; @@ -81,7 +108,7 @@ mod test { pallets: vec![], extrinsic: ExtrinsicMetadataIR { ty: meta_type::<()>(), - version: 0, + versions: vec![0], address_ty: meta_type::<()>(), call_ty: meta_type::<()>(), signature_ty: meta_type::<()>(), diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index 199b692fbd8c..af217ffe16ee 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -170,8 +170,8 @@ pub struct ExtrinsicMetadataIR { /// /// Note: Field used for metadata V14 only. pub ty: T::Type, - /// Extrinsic version. - pub version: u8, + /// Extrinsic versions. + pub versions: Vec, /// The type of the address that signs the extrinsic pub address_ty: T::Type, /// The type of the outermost Call enum. @@ -191,7 +191,7 @@ impl IntoPortable for ExtrinsicMetadataIR { fn into_portable(self, registry: &mut Registry) -> Self::Output { ExtrinsicMetadataIR { ty: registry.register_type(&self.ty), - version: self.version, + versions: self.versions, address_ty: registry.register_type(&self.address_ty), call_ty: registry.register_type(&self.call_ty), signature_ty: registry.register_type(&self.signature_ty), diff --git a/substrate/primitives/metadata-ir/src/unstable.rs b/substrate/primitives/metadata-ir/src/unstable.rs new file mode 100644 index 000000000000..d46ce3ec6a7d --- /dev/null +++ b/substrate/primitives/metadata-ir/src/unstable.rs @@ -0,0 +1,211 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convert the IR to V16 metadata. + +use crate::{ + DeprecationInfoIR, DeprecationStatusIR, OuterEnumsIR, PalletAssociatedTypeMetadataIR, + PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, + PalletStorageMetadataIR, StorageEntryMetadataIR, +}; + +use super::types::{ + ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, +}; + +use frame_metadata::v16::{ + CustomMetadata, DeprecationInfo, DeprecationStatus, ExtrinsicMetadata, OuterEnums, + PalletAssociatedTypeMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, + PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeApiMetadata, + RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV16, + StorageEntryMetadata, TransactionExtensionMetadata, +}; + +impl From for RuntimeMetadataV16 { + fn from(ir: MetadataIR) -> Self { + RuntimeMetadataV16::new( + ir.pallets.into_iter().map(Into::into).collect(), + ir.extrinsic.into(), + ir.apis.into_iter().map(Into::into).collect(), + ir.outer_enums.into(), + // Substrate does not collect yet the custom metadata fields. + // This allows us to extend the V16 easily. + CustomMetadata { map: Default::default() }, + ) + } +} + +impl From for RuntimeApiMetadata { + fn from(ir: RuntimeApiMetadataIR) -> Self { + RuntimeApiMetadata { + name: ir.name, + methods: ir.methods.into_iter().map(Into::into).collect(), + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for RuntimeApiMethodMetadata { + fn from(ir: RuntimeApiMethodMetadataIR) -> Self { + RuntimeApiMethodMetadata { + name: ir.name, + inputs: ir.inputs.into_iter().map(Into::into).collect(), + output: ir.output, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for RuntimeApiMethodParamMetadata { + fn from(ir: RuntimeApiMethodParamMetadataIR) -> Self { + RuntimeApiMethodParamMetadata { name: ir.name, ty: ir.ty } + } +} + +impl From for PalletMetadata { + fn from(ir: PalletMetadataIR) -> Self { + PalletMetadata { + name: ir.name, + storage: ir.storage.map(Into::into), + calls: ir.calls.map(Into::into), + event: ir.event.map(Into::into), + constants: ir.constants.into_iter().map(Into::into).collect(), + error: ir.error.map(Into::into), + index: ir.index, + docs: ir.docs, + associated_types: ir.associated_types.into_iter().map(Into::into).collect(), + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for PalletStorageMetadata { + fn from(ir: PalletStorageMetadataIR) -> Self { + PalletStorageMetadata { + prefix: ir.prefix, + entries: ir.entries.into_iter().map(Into::into).collect(), + } + } +} + +impl From for StorageEntryMetadata { + fn from(ir: StorageEntryMetadataIR) -> Self { + StorageEntryMetadata { + name: ir.name, + modifier: ir.modifier.into(), + ty: ir.ty.into(), + default: ir.default, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for PalletAssociatedTypeMetadata { + fn from(ir: PalletAssociatedTypeMetadataIR) -> Self { + PalletAssociatedTypeMetadata { name: ir.name, ty: ir.ty, docs: ir.docs } + } +} + +impl From for PalletErrorMetadata { + fn from(ir: PalletErrorMetadataIR) -> Self { + PalletErrorMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletEventMetadata { + fn from(ir: PalletEventMetadataIR) -> Self { + PalletEventMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletCallMetadata { + fn from(ir: PalletCallMetadataIR) -> Self { + PalletCallMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletConstantMetadata { + fn from(ir: PalletConstantMetadataIR) -> Self { + PalletConstantMetadata { + name: ir.name, + ty: ir.ty, + value: ir.value, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for TransactionExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { + TransactionExtensionMetadata { identifier: ir.identifier, ty: ir.ty, implicit: ir.implicit } + } +} + +impl From for ExtrinsicMetadata { + fn from(ir: ExtrinsicMetadataIR) -> Self { + // Assume version 0 for all extensions. + let indexes = (0..ir.extensions.len()).map(|index| index as u32).collect(); + let transaction_extensions_by_version = [(0, indexes)].iter().cloned().collect(); + + ExtrinsicMetadata { + versions: ir.versions, + address_ty: ir.address_ty, + signature_ty: ir.signature_ty, + transaction_extensions_by_version, + transaction_extensions: ir.extensions.into_iter().map(Into::into).collect(), + } + } +} + +impl From for OuterEnums { + fn from(ir: OuterEnumsIR) -> Self { + OuterEnums { + call_enum_ty: ir.call_enum_ty, + event_enum_ty: ir.event_enum_ty, + error_enum_ty: ir.error_enum_ty, + } + } +} + +impl From for DeprecationStatus { + fn from(ir: DeprecationStatusIR) -> Self { + match ir { + DeprecationStatusIR::NotDeprecated => DeprecationStatus::NotDeprecated, + DeprecationStatusIR::DeprecatedWithoutNote => DeprecationStatus::DeprecatedWithoutNote, + DeprecationStatusIR::Deprecated { since, note } => + DeprecationStatus::Deprecated { since, note }, + } + } +} + +impl From for DeprecationInfo { + fn from(ir: DeprecationInfoIR) -> Self { + match ir { + DeprecationInfoIR::NotDeprecated => DeprecationInfo::NotDeprecated, + DeprecationInfoIR::ItemDeprecated(status) => + DeprecationInfo::ItemDeprecated(status.into()), + DeprecationInfoIR::VariantsDeprecated(btree) => DeprecationInfo::VariantsDeprecated( + btree.into_iter().map(|(key, value)| (key.0, value.into())).collect(), + ), + } + } +} diff --git a/substrate/primitives/metadata-ir/src/v14.rs b/substrate/primitives/metadata-ir/src/v14.rs index 70e84532add9..f3cb5973f5bd 100644 --- a/substrate/primitives/metadata-ir/src/v14.rs +++ b/substrate/primitives/metadata-ir/src/v14.rs @@ -149,9 +149,12 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { + let lowest_supported_version = + ir.versions.iter().min().expect("Metadata V14 supports one version; qed"); + ExtrinsicMetadata { ty: ir.ty, - version: ir.version, + version: *lowest_supported_version, signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index 4b3b6106d27f..ed315a31e6dc 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -100,7 +100,7 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { ExtrinsicMetadata { - version: ir.version, + version: *ir.versions.iter().min().expect("Metadata V15 supports only one version"), address_ty: ir.address_ty, call_ty: ir.call_ty, signature_ty: ir.signature_ty, diff --git a/substrate/primitives/panic-handler/src/lib.rs b/substrate/primitives/panic-handler/src/lib.rs index c4a7eb8dc67c..81ccaaee828e 100644 --- a/substrate/primitives/panic-handler/src/lib.rs +++ b/substrate/primitives/panic-handler/src/lib.rs @@ -30,7 +30,7 @@ use std::{ cell::Cell, io::{self, Write}, marker::PhantomData, - panic::{self, PanicInfo}, + panic::{self, PanicHookInfo}, sync::LazyLock, thread, }; @@ -149,7 +149,7 @@ fn strip_control_codes(input: &str) -> std::borrow::Cow { } /// Function being called when a panic happens. -fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { +fn panic_hook(info: &PanicHookInfo, report_url: &str, version: &str) { let location = info.location(); let file = location.as_ref().map(|l| l.file()).unwrap_or(""); let line = location.as_ref().map(|l| l.line()).unwrap_or(0); diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 91ba37451909..d8510a60a789 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -389,8 +389,7 @@ where impl> ExtrinsicMetadata for UncheckedExtrinsic { - // TODO: Expose both version 4 and version 5 in metadata v16. - const VERSION: u8 = LEGACY_EXTRINSIC_FORMAT_VERSION; + const VERSIONS: &'static [u8] = &[LEGACY_EXTRINSIC_FORMAT_VERSION, EXTRINSIC_FORMAT_VERSION]; type TransactionExtensions = Extension; } diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 01bdcca86b6f..cfcc3e5a354d 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -1410,10 +1410,10 @@ impl SignaturePayload for () { /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. pub trait ExtrinsicMetadata { - /// The format version of the `Extrinsic`. + /// The format versions of the `Extrinsic`. /// - /// By format is meant the encoded representation of the `Extrinsic`. - const VERSION: u8; + /// By format we mean the encoded representation of the `Extrinsic`. + const VERSIONS: &'static [u8]; /// Transaction extensions attached to this `Extrinsic`. type TransactionExtensions; @@ -2349,7 +2349,8 @@ pub trait BlockNumberProvider { + TypeInfo + Debug + MaxEncodedLen - + Copy; + + Copy + + EncodeLike; /// Returns the current block number. /// diff --git a/substrate/primitives/runtime/src/type_with_default.rs b/substrate/primitives/runtime/src/type_with_default.rs index 5790e3ab6bf6..b0eca22e5c1a 100644 --- a/substrate/primitives/runtime/src/type_with_default.rs +++ b/substrate/primitives/runtime/src/type_with_default.rs @@ -31,7 +31,7 @@ use num_traits::{ CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, CheckedShr, CheckedSub, Num, NumCast, PrimInt, Saturating, ToPrimitive, }; -use scale_info::TypeInfo; +use scale_info::{StaticTypeInfo, TypeInfo}; use sp_core::Get; #[cfg(feature = "serde")] @@ -40,7 +40,8 @@ use serde::{Deserialize, Serialize}; /// A type that wraps another type and provides a default value. /// /// Passes through arithmetical and many other operations to the inner value. -#[derive(Encode, Decode, TypeInfo, Debug, MaxEncodedLen)] +/// Type information for metadata is the same as the inner value's type. +#[derive(Encode, Decode, Debug, MaxEncodedLen)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TypeWithDefault>(T, PhantomData); @@ -50,6 +51,17 @@ impl> TypeWithDefault { } } +// Hides implementation details from the outside (for metadata type information). +// +// The type info showed in metadata is the one of the inner value's type. +impl + 'static> TypeInfo for TypeWithDefault { + type Identity = Self; + + fn type_info() -> scale_info::Type { + T::type_info() + } +} + impl> Clone for TypeWithDefault { fn clone(&self) -> Self { Self(self.0.clone(), PhantomData) @@ -511,6 +523,7 @@ impl> CompactAs for TypeWithDefault { #[cfg(test)] mod tests { use super::TypeWithDefault; + use scale_info::TypeInfo; use sp_arithmetic::traits::{AtLeast16Bit, AtLeast32Bit, AtLeast8Bit}; use sp_core::Get; @@ -565,5 +578,11 @@ mod tests { } type U128WithDefault = TypeWithDefault; impl WrapAtLeast32Bit for U128WithDefault {} + + assert_eq!(U8WithDefault::type_info(), ::type_info()); + assert_eq!(U16WithDefault::type_info(), ::type_info()); + assert_eq!(U32WithDefault::type_info(), ::type_info()); + assert_eq!(U64WithDefault::type_info(), ::type_info()); + assert_eq!(U128WithDefault::type_info(), ::type_info()); } } diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index ebd1eab5980d..a67c91fc5f79 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -29,9 +29,7 @@ sc-client-db = { features = [ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-service = { features = [ - "test-helpers", -], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index c07640653d56..5a4e6c911694 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -27,9 +27,7 @@ pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, WasmExecutionMethod, WasmExecutor}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; -pub use sp_keyring::{ - ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, -}; +pub use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; pub use sp_keystore::{Keystore, KeystorePtr}; pub use sp_runtime::{Storage, StorageChild}; diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1c82c73072bc..96a888052876 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -45,7 +45,7 @@ sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-trie = { workspace = true } sp-transaction-pool = { workspace = true } trie-db = { workspace = true } -sc-service = { features = ["test-helpers"], optional = true, workspace = true } +sc-service = { optional = true, workspace = true } sp-state-machine = { workspace = true } sp-externalities = { workspace = true } diff --git a/substrate/test-utils/runtime/client/src/lib.rs b/substrate/test-utils/runtime/client/src/lib.rs index 435f3f5ebacb..a5a37660660c 100644 --- a/substrate/test-utils/runtime/client/src/lib.rs +++ b/substrate/test-utils/runtime/client/src/lib.rs @@ -45,7 +45,7 @@ pub mod prelude { Backend, ExecutorDispatch, TestClient, TestClientBuilder, WasmExecutionMethod, }; // Keyring - pub use super::{AccountKeyring, Sr25519Keyring}; + pub use super::Sr25519Keyring; } /// Test client database backend. diff --git a/substrate/test-utils/runtime/client/src/trait_tests.rs b/substrate/test-utils/runtime/client/src/trait_tests.rs index c3a5f173d14e..815e05163281 100644 --- a/substrate/test-utils/runtime/client/src/trait_tests.rs +++ b/substrate/test-utils/runtime/client/src/trait_tests.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use crate::{ - AccountKeyring, BlockBuilderExt, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, + BlockBuilderExt, ClientBlockImportExt, Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, }; use futures::executor::block_on; use sc_block_builder::BlockBuilderBuilder; @@ -132,8 +132,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -179,8 +179,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -199,8 +199,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -295,8 +295,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -338,8 +338,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -357,8 +357,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -464,8 +464,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -507,8 +507,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -526,8 +526,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 0, }) diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index 4c884d4174ff..491086bef497 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -25,7 +25,7 @@ use codec::Encode; use frame_metadata_hash_extension::CheckMetadataHash; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; -use sp_keyring::AccountKeyring; +use sp_keyring::Sr25519Keyring; use sp_runtime::{ generic::Preamble, traits::TransactionExtension, transaction_validity::TransactionPriority, Perbill, @@ -54,8 +54,8 @@ impl Transfer { impl Default for TransferData { fn default() -> Self { Self { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 0, nonce: 0, } @@ -93,7 +93,7 @@ impl ExtrinsicBuilder { pub fn new(function: impl Into) -> Self { Self { function: function.into(), - signer: Some(AccountKeyring::Alice.pair()), + signer: Some(Sr25519Keyring::Alice.pair()), nonce: None, metadata_hash: None, } diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs index 9e972886b377..5c0c146d45a5 100644 --- a/substrate/test-utils/runtime/src/genesismap.rs +++ b/substrate/test-utils/runtime/src/genesismap.rs @@ -27,7 +27,7 @@ use sp_core::{ storage::{well_known_keys, StateVersion, Storage}, Pair, }; -use sp_keyring::{AccountKeyring, Sr25519Keyring}; +use sp_keyring::Sr25519Keyring; use sp_runtime::{ traits::{Block as BlockT, Hash as HashT, Header as HeaderT}, BuildStorage, @@ -60,11 +60,11 @@ impl Default for GenesisStorageBuilder { ], (0..16_usize) .into_iter() - .map(|i| AccountKeyring::numeric(i).public()) + .map(|i| Sr25519Keyring::numeric(i).public()) .chain(vec![ - AccountKeyring::Alice.into(), - AccountKeyring::Bob.into(), - AccountKeyring::Charlie.into(), + Sr25519Keyring::Alice.into(), + Sr25519Keyring::Bob.into(), + Sr25519Keyring::Charlie.into(), ]) .collect(), 1000 * currency::DOLLARS, diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 461d583b5cc6..666776865316 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -47,7 +47,7 @@ use frame_system::{ }; use scale_info::TypeInfo; use sp_application_crypto::Ss58Codec; -use sp_keyring::AccountKeyring; +use sp_keyring::Sr25519Keyring; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{OpaqueMetadata, RuntimeDebug}; @@ -731,8 +731,8 @@ impl_runtime_apis! { let patch = match name.as_ref() { "staging" => { let endowed_accounts: Vec = vec![ - AccountKeyring::Bob.public().into(), - AccountKeyring::Charlie.public().into(), + Sr25519Keyring::Bob.public().into(), + Sr25519Keyring::Charlie.public().into(), ]; json!({ @@ -741,8 +741,8 @@ impl_runtime_apis! { }, "substrateTest": { "authorities": [ - AccountKeyring::Alice.public().to_ss58check(), - AccountKeyring::Ferdie.public().to_ss58check() + Sr25519Keyring::Alice.public().to_ss58check(), + Sr25519Keyring::Ferdie.public().to_ss58check() ], } }) @@ -911,11 +911,11 @@ pub mod storage_key_generator { let balances_map_keys = (0..16_usize) .into_iter() - .map(|i| AccountKeyring::numeric(i).public().to_vec()) + .map(|i| Sr25519Keyring::numeric(i).public().to_vec()) .chain(vec![ - AccountKeyring::Alice.public().to_vec(), - AccountKeyring::Bob.public().to_vec(), - AccountKeyring::Charlie.public().to_vec(), + Sr25519Keyring::Alice.public().to_vec(), + Sr25519Keyring::Bob.public().to_vec(), + Sr25519Keyring::Charlie.public().to_vec(), ]) .map(|pubkey| { sp_crypto_hashing::blake2_128(&pubkey) @@ -1133,8 +1133,8 @@ mod tests { pub fn new_test_ext() -> sp_io::TestExternalities { genesismap::GenesisStorageBuilder::new( - vec![AccountKeyring::One.public().into(), AccountKeyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], + vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], 1000 * currency::DOLLARS, ) .build() @@ -1202,7 +1202,7 @@ mod tests { fn check_substrate_check_signed_extension_works() { sp_tracing::try_init_simple(); new_test_ext().execute_with(|| { - let x = AccountKeyring::Alice.into(); + let x = Sr25519Keyring::Alice.into(); let info = DispatchInfo::default(); let len = 0_usize; assert_eq!( @@ -1472,8 +1472,8 @@ mod tests { }, "substrateTest": { "authorities": [ - AccountKeyring::Ferdie.public().to_ss58check(), - AccountKeyring::Alice.public().to_ss58check() + Sr25519Keyring::Ferdie.public().to_ss58check(), + Sr25519Keyring::Alice.public().to_ss58check() ], } }); @@ -1502,8 +1502,8 @@ mod tests { let authority_key_vec = Vec::::decode(&mut &value[..]).unwrap(); assert_eq!(authority_key_vec.len(), 2); - assert_eq!(authority_key_vec[0], AccountKeyring::Ferdie.public()); - assert_eq!(authority_key_vec[1], AccountKeyring::Alice.public()); + assert_eq!(authority_key_vec[0], Sr25519Keyring::Ferdie.public()); + assert_eq!(authority_key_vec[1], Sr25519Keyring::Alice.public()); //Babe|Authorities let value: Vec = get_from_storage( diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 6a4f38f63e82..93e5855eefc6 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -43,7 +43,7 @@ use substrate_test_runtime_client::{ AccountId, Block, BlockNumber, Extrinsic, ExtrinsicBuilder, Hash, Header, Nonce, Transfer, TransferData, }, - AccountKeyring::{self, *}, + Sr25519Keyring::{self, *}, }; /// Error type used by [`TestApi`]. @@ -338,7 +338,7 @@ trait TagFrom { impl TagFrom for AccountId { fn tag_from(&self) -> u8 { - let f = AccountKeyring::iter().enumerate().find(|k| AccountId::from(k.1) == *self); + let f = Sr25519Keyring::iter().enumerate().find(|k| AccountId::from(k.1) == *self); u8::try_from(f.unwrap().0).unwrap() } } @@ -534,7 +534,7 @@ impl sp_blockchain::HeaderMetadata for TestApi { /// Generate transfer extrinsic with a given nonce. /// /// Part of the test api. -pub fn uxt(who: AccountKeyring, nonce: Nonce) -> Extrinsic { +pub fn uxt(who: Sr25519Keyring, nonce: Nonce) -> Extrinsic { let dummy = codec::Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap(); let transfer = Transfer { from: who.into(), to: dummy, nonce, amount: 1 }; ExtrinsicBuilder::new_transfer(transfer).build() diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 8a4a06b1b40a..6d86346ee180 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -44,6 +44,7 @@ sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true } sc-service = { workspace = true } sc-sysinfo = { workspace = true, default-features = true } +sc-runtime-utilities = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/benchmarking-cli/src/lib.rs b/substrate/utils/frame/benchmarking-cli/src/lib.rs index 1e8642e54d70..e1c3c5fe3706 100644 --- a/substrate/utils/frame/benchmarking-cli/src/lib.rs +++ b/substrate/utils/frame/benchmarking-cli/src/lib.rs @@ -30,7 +30,6 @@ pub use extrinsic::{ExtrinsicBuilder, ExtrinsicCmd, ExtrinsicFactory}; pub use machine::{MachineCmd, SUBSTRATE_REFERENCE_HARDWARE}; pub use overhead::{ remark_builder::{DynamicRemarkBuilder, SubstrateRemarkBuilder}, - runtime_utilities::fetch_latest_metadata_from_code_blob, OpaqueBlock, OverheadCmd, }; pub use pallet::PalletCmd; diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs index 8102f14b4f4b..8df8ee5464f7 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs @@ -18,7 +18,6 @@ //! Contains the [`OverheadCmd`] as entry point for the CLI to execute //! the *overhead* benchmarks. -use super::runtime_utilities::*; use crate::{ extrinsic::{ bench::{Benchmark, BenchmarkParams as ExtrinsicBenchmarkParams}, @@ -37,7 +36,7 @@ use crate::{ }, }; use clap::{error::ErrorKind, Args, CommandFactory, Parser}; -use codec::Encode; +use codec::{Decode, Encode}; use cumulus_client_parachain_inherent::MockValidationDataInherentDataProvider; use fake_runtime_api::RuntimeApi as FakeRuntimeApi; use frame_support::Deserialize; @@ -50,6 +49,7 @@ use sc_cli::{CliConfiguration, Database, ImportParams, Result, SharedParams}; use sc_client_api::{execution_extensions::ExecutionExtensions, UsageProvider}; use sc_client_db::{BlocksPruning, DatabaseSettings}; use sc_executor::WasmExecutor; +use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; use sc_service::{new_client, new_db_backend, BasePath, ClientConfig, TFullClient, TaskManager}; use serde::Serialize; use serde_json::{json, Value}; @@ -317,7 +317,7 @@ impl OverheadCmd { Some(self.params.genesis_builder_preset.clone()), ), self.params.para_id, - )) + )); }; Err("Neither a runtime nor a chain-spec were specified".to_string().into()) @@ -335,7 +335,7 @@ impl OverheadCmd { ErrorKind::MissingRequiredArgument, "Provide either a runtime via `--runtime` or a chain spec via `--chain`" .to_string(), - )) + )); } match self.params.genesis_builder { @@ -344,7 +344,7 @@ impl OverheadCmd { return Err(( ErrorKind::MissingRequiredArgument, "Provide a chain spec via `--chain`.".to_string(), - )) + )); }, _ => {}, }; @@ -400,8 +400,12 @@ impl OverheadCmd { .with_allow_missing_host_functions(true) .build(); - let metadata = - fetch_latest_metadata_from_code_blob(&executor, state_handler.get_code_bytes()?)?; + let opaque_metadata = + fetch_latest_metadata_from_code_blob(&executor, state_handler.get_code_bytes()?) + .map_err(|_| { + <&str as Into>::into("Unable to fetch latest stable metadata") + })?; + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice())?; // At this point we know what kind of chain we are dealing with. let chain_type = identify_chain(&metadata, para_id); @@ -682,6 +686,7 @@ mod tests { OverheadCmd, }; use clap::Parser; + use codec::Decode; use sc_executor::WasmExecutor; #[test] @@ -690,8 +695,9 @@ mod tests { let code_bytes = westend_runtime::WASM_BINARY .expect("To run this test, build the wasm binary of westend-runtime") .to_vec(); - let metadata = + let opaque_metadata = super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); let chain_type = identify_chain(&metadata, None); assert_eq!(chain_type, ChainType::Relaychain); assert_eq!(chain_type.requires_proof_recording(), false); @@ -703,8 +709,9 @@ mod tests { let code_bytes = cumulus_test_runtime::WASM_BINARY .expect("To run this test, build the wasm binary of cumulus-test-runtime") .to_vec(); - let metadata = + let opaque_metadata = super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); let chain_type = identify_chain(&metadata, Some(100)); assert_eq!(chain_type, ChainType::Parachain(100)); assert!(chain_type.requires_proof_recording()); @@ -717,8 +724,9 @@ mod tests { let code_bytes = substrate_test_runtime::WASM_BINARY .expect("To run this test, build the wasm binary of substrate-test-runtime") .to_vec(); - let metadata = + let opaque_metadata = super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); let chain_type = identify_chain(&metadata, None); assert_eq!(chain_type, ChainType::Unknown); assert_eq!(chain_type.requires_proof_recording(), false); diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs index 89c23d1fb6c1..de524d9ebc18 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs @@ -20,6 +20,5 @@ pub mod template; mod fake_runtime_api; pub mod remark_builder; -pub mod runtime_utilities; pub use command::{OpaqueBlock, OverheadCmd}; diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs index a1d5f282d9f8..3a2d8776d1e1 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs @@ -54,13 +54,15 @@ impl> DynamicRemarkBuilder { log::debug!("Found metadata API version {}.", metadata_api_version); let opaque_metadata = if metadata_api_version > 1 { - let Ok(mut supported_metadata_versions) = api.metadata_versions(genesis) else { + let Ok(supported_metadata_versions) = api.metadata_versions(genesis) else { return Err("Unable to fetch metadata versions".to_string().into()); }; let latest = supported_metadata_versions - .pop() - .ok_or("No metadata version supported".to_string())?; + .into_iter() + .filter(|v| *v != u32::MAX) + .max() + .ok_or("No stable metadata versions supported".to_string())?; api.metadata_at_version(genesis, latest) .map_err(|e| format!("Unable to fetch metadata: {:?}", e))? diff --git a/substrate/utils/frame/rpc/system/src/lib.rs b/substrate/utils/frame/rpc/system/src/lib.rs index 824c871a3562..e1b3994c03dd 100644 --- a/substrate/utils/frame/rpc/system/src/lib.rs +++ b/substrate/utils/frame/rpc/system/src/lib.rs @@ -224,7 +224,7 @@ mod tests { transaction_validity::{InvalidTransaction, TransactionValidityError}, ApplyExtrinsicResult, }; - use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; + use substrate_test_runtime_client::{runtime::Transfer, Sr25519Keyring}; fn deny_unsafe() -> Extensions { let mut ext = Extensions::new(); @@ -256,8 +256,8 @@ mod tests { let source = sp_runtime::transaction_validity::TransactionSource::External; let new_transaction = |nonce: u64| { let t = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 5, nonce, }; @@ -273,7 +273,7 @@ mod tests { let accounts = System::new(client, pool); // when - let nonce = accounts.nonce(AccountKeyring::Alice.into()).await; + let nonce = accounts.nonce(Sr25519Keyring::Alice.into()).await; // then assert_eq!(nonce.unwrap(), 2); @@ -321,8 +321,8 @@ mod tests { let accounts = System::new(client, pool); let tx = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 5, nonce: 0, } @@ -357,8 +357,8 @@ mod tests { let accounts = System::new(client, pool); let tx = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 5, nonce: 100, } diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 8f0e8a23e54a..fb15e8619a38 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -35,7 +35,7 @@ sc-executor = { optional = true, workspace = true, default-features = true } sp-core = { optional = true, workspace = true, default-features = true } sp-io = { optional = true, workspace = true, default-features = true } sp-version = { optional = true, workspace = true, default-features = true } -frame-metadata = { features = ["current"], optional = true, workspace = true, default-features = true } +frame-metadata = { features = ["current", "unstable"], optional = true, workspace = true, default-features = true } codec = { optional = true, workspace = true, default-features = true } array-bytes = { optional = true, workspace = true, default-features = true } sp-tracing = { optional = true, workspace = true, default-features = true } diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index a40aafe1d812..5bdc743eac31 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -235,7 +235,8 @@ impl WasmBuilder { /// Build the WASM binary. pub fn build(mut self) { - let target = crate::runtime_target(); + let target = RuntimeTarget::new(); + if target == RuntimeTarget::Wasm { if self.export_heap_base { self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 420ecd63e1dc..ce90f492e08f 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -112,7 +112,6 @@ //! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. use std::{ - collections::BTreeSet, env, fs, io::BufRead, path::{Path, PathBuf}, @@ -254,26 +253,22 @@ struct CargoCommand { program: String, args: Vec, version: Option, - target_list: Option>, } impl CargoCommand { fn new(program: &str) -> Self { let version = Self::extract_version(program, &[]); - let target_list = Self::extract_target_list(program, &[]); - CargoCommand { program: program.into(), args: Vec::new(), version, target_list } + CargoCommand { program: program.into(), args: Vec::new(), version } } fn new_with_args(program: &str, args: &[&str]) -> Self { let version = Self::extract_version(program, args); - let target_list = Self::extract_target_list(program, args); CargoCommand { program: program.into(), args: args.iter().map(ToString::to_string).collect(), version, - target_list, } } @@ -294,23 +289,6 @@ impl CargoCommand { Version::extract(&version) } - fn extract_target_list(program: &str, args: &[&str]) -> Option> { - // This is technically an unstable option, but we don't care because we only need this - // to build RISC-V runtimes, and those currently require a specific nightly toolchain - // anyway, so it's totally fine for this to fail in other cases. - let list = Command::new(program) - .args(args) - .args(&["rustc", "-Z", "unstable-options", "--print", "target-list"]) - // Make sure if we're called from within a `build.rs` the host toolchain won't override - // a rustup toolchain we've picked. - .env_remove("RUSTC") - .output() - .ok() - .and_then(|o| String::from_utf8(o.stdout).ok())?; - - Some(list.trim().split("\n").map(ToString::to_string).collect()) - } - /// Returns the version of this cargo command or `None` if it failed to extract the version. fn version(&self) -> Option { self.version @@ -326,19 +304,10 @@ impl CargoCommand { fn supports_substrate_runtime_env(&self, target: RuntimeTarget) -> bool { match target { RuntimeTarget::Wasm => self.supports_substrate_runtime_env_wasm(), - RuntimeTarget::Riscv => self.supports_substrate_runtime_env_riscv(), + RuntimeTarget::Riscv => true, } } - /// Check if the supplied cargo command supports our RISC-V runtime environment. - fn supports_substrate_runtime_env_riscv(&self) -> bool { - let Some(target_list) = self.target_list.as_ref() else { return false }; - // This is our custom target which currently doesn't exist on any upstream toolchain, - // so if it exists it's guaranteed to be our custom toolchain and have have everything - // we need, so any further version checks are unnecessary at this point. - target_list.contains("riscv32ema-unknown-none-elf") - } - /// Check if the supplied cargo command supports our Substrate wasm environment. /// /// This means that either the cargo version is at minimum 1.68.0 or this is a nightly cargo. @@ -409,13 +378,6 @@ fn get_bool_environment_variable(name: &str) -> Option { } } -/// Returns whether we need to also compile the standard library when compiling the runtime. -fn build_std_required() -> bool { - let default = runtime_target() == RuntimeTarget::Wasm; - - crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(default) -} - #[derive(Copy, Clone, PartialEq, Eq)] enum RuntimeTarget { Wasm, @@ -423,36 +385,55 @@ enum RuntimeTarget { } impl RuntimeTarget { - fn rustc_target(self) -> &'static str { + /// Creates a new instance. + fn new() -> Self { + let Some(value) = env::var_os(RUNTIME_TARGET) else { + return Self::Wasm; + }; + + if value == "wasm" { + Self::Wasm + } else if value == "riscv" { + Self::Riscv + } else { + build_helper::warning!( + "RUNTIME_TARGET environment variable must be set to either \"wasm\" or \"riscv\"" + ); + std::process::exit(1); + } + } + + /// Figures out the target parameter value for rustc. + fn rustc_target(self) -> String { match self { - RuntimeTarget::Wasm => "wasm32-unknown-unknown", - RuntimeTarget::Riscv => "riscv32ema-unknown-none-elf", + RuntimeTarget::Wasm => "wasm32-unknown-unknown".to_string(), + RuntimeTarget::Riscv => { + let path = polkavm_linker::target_json_32_path().expect("riscv not found"); + path.into_os_string().into_string().unwrap() + }, } } - fn build_subdirectory(self) -> &'static str { - // Keep the build directories separate so that when switching between - // the targets we won't trigger unnecessary rebuilds. + /// Figures out the target directory name used by cargo. + fn rustc_target_dir(self) -> &'static str { match self { - RuntimeTarget::Wasm => "wbuild", - RuntimeTarget::Riscv => "rbuild", + RuntimeTarget::Wasm => "wasm32-unknown-unknown", + RuntimeTarget::Riscv => "riscv32emac-unknown-none-polkavm", } } -} -fn runtime_target() -> RuntimeTarget { - let Some(value) = env::var_os(RUNTIME_TARGET) else { - return RuntimeTarget::Wasm; - }; + /// Figures out the build-std argument. + fn rustc_target_build_std(self) -> Option<&'static str> { + if !crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(true) { + return None; + } - if value == "wasm" { - RuntimeTarget::Wasm - } else if value == "riscv" { - RuntimeTarget::Riscv - } else { - build_helper::warning!( - "the '{RUNTIME_TARGET}' environment variable has an invalid value; it must be either 'wasm' or 'riscv'" - ); - std::process::exit(1); + // This is a nightly-only flag. + let arg = match self { + RuntimeTarget::Wasm => "build-std", + RuntimeTarget::Riscv => "build-std=core,alloc", + }; + + Some(arg) } } diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index 4de6b87f618d..9abfd1725237 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -196,11 +196,14 @@ fn check_wasm_toolchain_installed( error, colorize_aux_message(&"-".repeat(60)), )) - } + }; } let version = dummy_crate.get_rustc_version(); - if crate::build_std_required() { + + let target = RuntimeTarget::new(); + assert!(target == RuntimeTarget::Wasm); + if target.rustc_target_build_std().is_some() { if let Some(sysroot) = dummy_crate.get_sysroot() { let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 26edd2ea1f22..6530e4c22fb9 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -109,6 +109,15 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { crate_metadata } +/// Keep the build directories separate so that when switching between the +/// targets we won't trigger unnecessary rebuilds. +fn build_subdirectory(target: RuntimeTarget) -> &'static str { + match target { + RuntimeTarget::Wasm => "wbuild", + RuntimeTarget::Riscv => "rbuild", + } +} + /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. /// /// # Returns @@ -125,7 +134,7 @@ pub(crate) fn create_and_compile( #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) -> (Option, WasmBinaryBloaty) { let runtime_workspace_root = get_wasm_workspace_root(); - let runtime_workspace = runtime_workspace_root.join(target.build_subdirectory()); + let runtime_workspace = runtime_workspace_root.join(build_subdirectory(target)); let crate_metadata = crate_metadata(orig_project_cargo_toml); @@ -770,7 +779,7 @@ impl BuildConfiguration { .collect::>() .iter() .rev() - .take_while(|c| c.as_os_str() != target.build_subdirectory()) + .take_while(|c| c.as_os_str() != build_subdirectory(target)) .last() .expect("We put the runtime project within a `target/.../[rw]build` path; qed") .as_os_str() @@ -841,9 +850,7 @@ fn build_bloaty_blob( "-C target-cpu=mvp -C target-feature=-sign-ext -C link-arg=--export-table ", ); }, - RuntimeTarget::Riscv => { - rustflags.push_str("-C target-feature=+lui-addi-fusion -C relocation-model=pie -C link-arg=--emit-relocs -C link-arg=--unique "); - }, + RuntimeTarget::Riscv => (), } rustflags.push_str(default_rustflags); @@ -907,10 +914,9 @@ fn build_bloaty_blob( // // So here we force the compiler to also compile the standard library crates for us // to make sure that they also only use the MVP features. - if crate::build_std_required() { - // Unfortunately this is still a nightly-only flag, but FWIW it is pretty widely used - // so it's unlikely to break without a replacement. - build_cmd.arg("-Z").arg("build-std"); + if let Some(arg) = target.rustc_target_build_std() { + build_cmd.arg("-Z").arg(arg); + if !cargo_cmd.supports_nightly_features() { build_cmd.env("RUSTC_BOOTSTRAP", "1"); } @@ -934,7 +940,7 @@ fn build_bloaty_blob( let blob_name = get_blob_name(target, &manifest_path); let target_directory = project .join("target") - .join(target.rustc_target()) + .join(target.rustc_target_dir()) .join(blob_build_profile.directory()); match target { RuntimeTarget::Riscv => { @@ -968,7 +974,7 @@ fn build_bloaty_blob( }, }; - std::fs::write(&polkavm_path, program.as_bytes()) + std::fs::write(&polkavm_path, program) .expect("writing the blob to a file always works"); } diff --git a/templates/minimal/README.md b/templates/minimal/README.md index cf43d71d8849..22f396c243ef 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -105,12 +105,11 @@ Omni Node, nonetheless. #### Run Omni Node -Start Omni Node with manual seal (3 seconds block times), minimal template runtime based -chain spec. We'll use `--tmp` flag to start the node with its configurations stored in a -temporary directory, which will be deleted at the end of the process. +Start Omni Node in development mode (sets up block production and finalization based on manual seal, +sealing a new block every 3 seconds), with a minimal template runtime chain spec. ```sh -polkadot-omni-node --chain --dev-block-time 3000 --tmp +polkadot-omni-node --chain --dev ``` ### Minimal Template Node @@ -160,7 +159,7 @@ Then make the changes in the network specification like so: # ... chain = "dev" chain_spec_path = "" -default_args = ["--dev-block-time 3000"] +default_args = ["--dev"] # .. ``` diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index 7b8449f2abe4..72eded5bfd13 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -41,7 +41,7 @@ pub mod genesis_config_presets { use super::*; use crate::{ interface::{Balance, MinimumBalance}, - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, BalancesConfig, RuntimeGenesisConfig, SudoConfig, }; @@ -53,11 +53,11 @@ pub mod genesis_config_presets { let endowment = >::get().max(1) * 1000; frame_support::build_struct_json_patch!(RuntimeGenesisConfig { balances: BalancesConfig { - balances: AccountKeyring::iter() + balances: Sr25519Keyring::iter() .map(|a| (a.to_account_id(), endowment)) .collect::>(), }, - sudo: SudoConfig { key: Some(AccountKeyring::Alice.to_account_id()) }, + sudo: SudoConfig { key: Some(Sr25519Keyring::Alice.to_account_id()) }, }) } diff --git a/templates/minimal/zombienet-omni-node.toml b/templates/minimal/zombienet-omni-node.toml index 33b0fceba68c..acd5b121c674 100644 --- a/templates/minimal/zombienet-omni-node.toml +++ b/templates/minimal/zombienet-omni-node.toml @@ -2,7 +2,7 @@ default_command = "polkadot-omni-node" chain = "dev" chain_spec_path = "" -default_args = ["--dev-block-time 3000"] +default_args = ["--dev"] [[relaychain.nodes]] name = "alice" diff --git a/templates/parachain/README.md b/templates/parachain/README.md index 65a6979041f2..c1e333df9e9e 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -27,6 +27,7 @@ - [Connect with the Polkadot-JS Apps Front-End](#connect-with-the-polkadot-js-apps-front-end) - [Takeaways](#takeaways) +- [Runtime development](#runtime-development) - [Contributing](#contributing) - [Getting Help](#getting-help) @@ -107,13 +108,11 @@ with the relay chain ID where this instantiation of parachain-template will conn #### Run Omni Node -Start Omni Node with the generated chain spec. We'll start it development mode (without a relay chain config), -with a temporary directory for configuration (given `--tmp`), and block production set to create a block with -every second. +Start Omni Node with the generated chain spec. We'll start it in development mode (without a relay chain config), producing +and finalizing blocks based on manual seal, configured below to seal a block with each second. ```bash -polkadot-omni-node --chain --tmp --dev-block-time 1000 - +polkadot-omni-node --chain --dev --dev-block-time 1000 ``` However, such a setup is not close to what would run in production, and for that we need to setup a local @@ -197,6 +196,37 @@ Development parachains: - 💰 Are preconfigured with a genesis state that includes several prefunded development accounts. - 🧑‍⚖️ Development accounts are used as validators, collators, and `sudo` accounts. +## Runtime development + +We recommend using [`chopsticks`](https://github.com/AcalaNetwork/chopsticks) when the focus is more on the runtime +development and `OmniNode` is enough as is. + +### Install chopsticks + +To use `chopsticks`, please install the latest version according to the installation [guide](https://github.com/AcalaNetwork/chopsticks?tab=readme-ov-file#install). + +### Build a raw chain spec + +Build the `parachain-template-runtime` as mentioned before in this guide and use `chain-spec-builder` +again but this time by passing `--raw-storage` flag: + +```sh +chain-spec-builder create --raw-storage --relay-chain "rococo-local" --para-id 1000 --runtime \ + target/release/wbuild/parachain-template-runtime/parachain_template_runtime.wasm named-preset development +``` + +### Start `chopsticks` with the chain spec + +```sh +npx @acala-network/chopsticks@latest --chain-spec +``` + +### Alternatives + +`OmniNode` can be still used for runtime development if using the `--dev` flag, while `parachain-template-node` doesn't +support it at this moment. It can still be used to test a runtime in a full setup where it is started alongside a +relay chain network (see [Parachain Template node](#parachain-template-node) setup). + ## Contributing - 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). diff --git a/templates/parachain/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs index 55a099dd022b..d4b3a41b8969 100644 --- a/templates/parachain/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -7,6 +7,8 @@ use serde::{Deserialize, Serialize}; /// Specialized `ChainSpec` for the normal parachain runtime. pub type ChainSpec = sc_service::GenericChainSpec; +/// The relay chain that you want to configure this parachain to connect to. +pub const RELAY_CHAIN: &str = "rococo-local"; /// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] @@ -35,16 +37,13 @@ pub fn development_chain_spec() -> ChainSpec { ChainSpec::builder( runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { - relay_chain: "rococo-local".into(), - // You MUST set this to the correct network! - para_id: 1000, - }, + Extensions { relay_chain: RELAY_CHAIN.into(), para_id: runtime::PARACHAIN_ID }, ) .with_name("Development") .with_id("dev") .with_chain_type(ChainType::Development) .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) + .with_properties(properties) .build() } @@ -58,11 +57,7 @@ pub fn local_chain_spec() -> ChainSpec { #[allow(deprecated)] ChainSpec::builder( runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { - relay_chain: "rococo-local".into(), - // You MUST set this to the correct network! - para_id: 1000, - }, + Extensions { relay_chain: RELAY_CHAIN.into(), para_id: runtime::PARACHAIN_ID }, ) .with_name("Local Testnet") .with_id("local_testnet") diff --git a/templates/parachain/runtime/src/genesis_config_presets.rs b/templates/parachain/runtime/src/genesis_config_presets.rs index aa1ff7895eb8..f1b24e437247 100644 --- a/templates/parachain/runtime/src/genesis_config_presets.rs +++ b/templates/parachain/runtime/src/genesis_config_presets.rs @@ -16,8 +16,8 @@ use sp_keyring::Sr25519Keyring; /// The default XCM version to set in genesis config. const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; -/// Parachain id used for gensis config presets of parachain template. -const PARACHAIN_ID: u32 = 1000; +/// Parachain id used for genesis config presets of parachain template. +pub const PARACHAIN_ID: u32 = 1000; /// Generate the session keys from individual elements. /// diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 43e76dba0591..9669237af785 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -33,6 +33,7 @@ use frame_support::weights::{ constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; +pub use genesis_config_presets::PARACHAIN_ID; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; diff --git a/templates/solochain/runtime/src/genesis_config_presets.rs b/templates/solochain/runtime/src/genesis_config_presets.rs index 049f4593451b..6af8dc9cd18a 100644 --- a/templates/solochain/runtime/src/genesis_config_presets.rs +++ b/templates/solochain/runtime/src/genesis_config_presets.rs @@ -22,7 +22,7 @@ use serde_json::Value; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_genesis_builder::{self, PresetId}; -use sp_keyring::AccountKeyring; +use sp_keyring::Sr25519Keyring; // Returns the genesis config presets populated with given parameters. fn testnet_genesis( @@ -56,12 +56,12 @@ pub fn development_config_genesis() -> Value { sp_keyring::Ed25519Keyring::Alice.public().into(), )], vec![ - AccountKeyring::Alice.to_account_id(), - AccountKeyring::Bob.to_account_id(), - AccountKeyring::AliceStash.to_account_id(), - AccountKeyring::BobStash.to_account_id(), + Sr25519Keyring::Alice.to_account_id(), + Sr25519Keyring::Bob.to_account_id(), + Sr25519Keyring::AliceStash.to_account_id(), + Sr25519Keyring::BobStash.to_account_id(), ], - sp_keyring::AccountKeyring::Alice.to_account_id(), + sp_keyring::Sr25519Keyring::Alice.to_account_id(), ) } @@ -78,11 +78,11 @@ pub fn local_config_genesis() -> Value { sp_keyring::Ed25519Keyring::Bob.public().into(), ), ], - AccountKeyring::iter() - .filter(|v| v != &AccountKeyring::One && v != &AccountKeyring::Two) + Sr25519Keyring::iter() + .filter(|v| v != &Sr25519Keyring::One && v != &Sr25519Keyring::Two) .map(|v| v.to_account_id()) .collect::>(), - AccountKeyring::Alice.to_account_id(), + Sr25519Keyring::Alice.to_account_id(), ) } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 7f50658c4e16..68d71b4a5d5e 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -120,7 +120,6 @@ std = [ "pallet-recovery?/std", "pallet-referenda?/std", "pallet-remark?/std", - "pallet-revive-fixtures?/std", "pallet-revive-mock-network?/std", "pallet-revive?/std", "pallet-root-offences?/std", @@ -363,6 +362,7 @@ runtime-benchmarks = [ "staging-node-inspect?/runtime-benchmarks", "staging-xcm-builder?/runtime-benchmarks", "staging-xcm-executor?/runtime-benchmarks", + "staging-xcm?/runtime-benchmarks", "xcm-runtime-apis?/runtime-benchmarks", ] try-runtime = [ @@ -541,7 +541,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] +runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", @@ -605,7 +605,7 @@ runtime = [ "sp-wasm-interface", "sp-weights", ] -node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-revive-eth-rpc", "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-omni-node-lib", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] +node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-revive-eth-rpc", "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-omni-node-lib", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-runtime-utilities", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] tuples-96 = [ "frame-support-procedural?/tuples-96", "frame-support?/tuples-96", @@ -617,6 +617,12 @@ workspace = true [package.authors] workspace = true +[package.homepage] +workspace = true + +[package.repository] +workspace = true + [dependencies.assets-common] path = "../cumulus/parachains/runtimes/assets/common" default-features = false @@ -1187,11 +1193,6 @@ path = "../substrate/frame/revive" default-features = false optional = true -[dependencies.pallet-revive-fixtures] -path = "../substrate/frame/revive/fixtures" -default-features = false -optional = true - [dependencies.pallet-revive-proc-macro] path = "../substrate/frame/revive/proc-macro" default-features = false @@ -2327,6 +2328,11 @@ path = "../substrate/client/rpc-spec-v2" default-features = false optional = true +[dependencies.sc-runtime-utilities] +path = "../substrate/client/runtime-utilities" +default-features = false +optional = true + [dependencies.sc-service] path = "../substrate/client/service" default-features = false diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 2216864fad0f..7b3c869588f0 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -584,10 +584,6 @@ pub use pallet_revive; #[cfg(feature = "pallet-revive-eth-rpc")] pub use pallet_revive_eth_rpc; -/// Fixtures for testing and benchmarking. -#[cfg(feature = "pallet-revive-fixtures")] -pub use pallet_revive_fixtures; - /// A mock network for testing pallet-revive. #[cfg(feature = "pallet-revive-mock-network")] pub use pallet_revive_mock_network; @@ -1123,6 +1119,10 @@ pub use sc_rpc_server; #[cfg(feature = "sc-rpc-spec-v2")] pub use sc_rpc_spec_v2; +/// Substrate client utilities for frame runtime functions calls. +#[cfg(feature = "sc-runtime-utilities")] +pub use sc_runtime_utilities; + /// Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. /// Manages communication between them. #[cfg(feature = "sc-service")]